mirror of
https://github.com/telemt/telemt.git
synced 2026-04-14 17:14:09 +03:00
Compare commits
548 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c43de1bd2a | ||
|
|
101efe45b7 | ||
|
|
11df61c6ac | ||
|
|
08684bcbd2 | ||
|
|
744fb4425f | ||
|
|
80cb1bc221 | ||
|
|
8461556b02 | ||
|
|
cfd516edf3 | ||
|
|
803c2c0492 | ||
|
|
b762bd029f | ||
|
|
761679d306 | ||
|
|
41668b153d | ||
|
|
1d2f88ad29 | ||
|
|
80917f5abc | ||
|
|
dc61d300ab | ||
|
|
ae16080de5 | ||
|
|
b8ca1fc166 | ||
|
|
f9986944df | ||
|
|
cb877c2bc3 | ||
|
|
4426082c17 | ||
|
|
22097f8c7c | ||
|
|
1450af60a0 | ||
|
|
f1cc8d65f2 | ||
|
|
ec7e808daf | ||
|
|
e4b7e23e76 | ||
|
|
8b92b80b4a | ||
|
|
f7868aa00f | ||
|
|
655a08fa5c | ||
|
|
8bc432db49 | ||
|
|
a40d6929e5 | ||
|
|
8db566dbe9 | ||
|
|
bb71de0230 | ||
|
|
62a258f8e3 | ||
|
|
c868eaae74 | ||
|
|
8e1860f912 | ||
|
|
814bef9d99 | ||
|
|
3ceda15073 | ||
|
|
a3a6ea2880 | ||
|
|
24156b5067 | ||
|
|
a1dfa5b11d | ||
|
|
800356c751 | ||
|
|
1546b012a6 | ||
|
|
e6b77af931 | ||
|
|
8cfaab9320 | ||
|
|
2d69b9d0ae | ||
|
|
41c2b4de65 | ||
|
|
0a5e8a09fd | ||
|
|
2f9fddfa6f | ||
|
|
6f4356f72a | ||
|
|
0c3c9009a9 | ||
|
|
0475844701 | ||
|
|
1abf9bd05c | ||
|
|
6f17d4d231 | ||
|
|
bf30e93284 | ||
|
|
91be148b72 | ||
|
|
e46d2cfc52 | ||
|
|
d4cda6d546 | ||
|
|
e35d69c61f | ||
|
|
a353a94175 | ||
|
|
b856250b2c | ||
|
|
97d1476ded | ||
|
|
cde14fc1bf | ||
|
|
5723d50d0b | ||
|
|
3eb384e02a | ||
|
|
c960e0e245 | ||
|
|
6fc188f0c4 | ||
|
|
5c9fea5850 | ||
|
|
3011a9ef6d | ||
|
|
7b570be5b3 | ||
|
|
0461bc65c6 | ||
|
|
ead23608f0 | ||
|
|
cf82b637d2 | ||
|
|
2e8bfa1101 | ||
|
|
d091b0b251 | ||
|
|
56fc6c4896 | ||
|
|
042d4fd612 | ||
|
|
bbc69f945e | ||
|
|
03c9a2588f | ||
|
|
9de8b2f0bf | ||
|
|
76eb8634a4 | ||
|
|
4e5b67bae8 | ||
|
|
bb2f3b24ac | ||
|
|
73f218b62a | ||
|
|
9cbc625b9b | ||
|
|
13ff3af1db | ||
|
|
d3f32b5568 | ||
|
|
77f717e3d1 | ||
|
|
db3e246390 | ||
|
|
388e14d01f | ||
|
|
b74ba38d40 | ||
|
|
269fce839f | ||
|
|
5a4072c964 | ||
|
|
2d3c2807ab | ||
|
|
50ae16ddf7 | ||
|
|
de5c26b7d7 | ||
|
|
a95678988a | ||
|
|
b17482ede3 | ||
|
|
a059de9191 | ||
|
|
e7e763888b | ||
|
|
c0a3e43aa8 | ||
|
|
4c32370b25 | ||
|
|
a6c298b633 | ||
|
|
e7a1d26e6e | ||
|
|
b91c6cb339 | ||
|
|
e676633dcd | ||
|
|
c4e7f54cbe | ||
|
|
f85205d48d | ||
|
|
d767ec02ee | ||
|
|
51835c33f2 | ||
|
|
88a4c652b6 | ||
|
|
ea2d964502 | ||
|
|
bd7218c39c | ||
|
|
3055637571 | ||
|
|
19b84b9d73 | ||
|
|
165a1ede57 | ||
|
|
6ead8b1922 | ||
|
|
63aa1038c0 | ||
|
|
4473826303 | ||
|
|
d7bbb376c9 | ||
|
|
7a8f946029 | ||
|
|
f2e6dc1774 | ||
|
|
54d65dd124 | ||
|
|
24594e648e | ||
|
|
e8b38ea860 | ||
|
|
b14c2b0a9b | ||
|
|
c1ee43fbac | ||
|
|
c8632de5b6 | ||
|
|
b930ea1ec5 | ||
|
|
3b86a883b9 | ||
|
|
5933b5e821 | ||
|
|
8188fedf6a | ||
|
|
f3598cf309 | ||
|
|
f2335c211c | ||
|
|
246ca11b88 | ||
|
|
bb355e916f | ||
|
|
777b15b1da | ||
|
|
8814854ae4 | ||
|
|
44c65f9c60 | ||
|
|
1260217be9 | ||
|
|
ebd37932c5 | ||
|
|
43d7e6e991 | ||
|
|
0eca535955 | ||
|
|
3abde52de8 | ||
|
|
801f670827 | ||
|
|
99ba2f7bbc | ||
|
|
1689b8a5dc | ||
|
|
babd902d95 | ||
|
|
e14dd07220 | ||
|
|
d93a4fbd53 | ||
|
|
2798039ab8 | ||
|
|
9dce748679 | ||
|
|
79093679ab | ||
|
|
35a8f5b2e5 | ||
|
|
456c433875 | ||
|
|
8f1ffe8c25 | ||
|
|
342b0119dd | ||
|
|
2605929b93 | ||
|
|
36814b6355 | ||
|
|
269ba537ad | ||
|
|
5c0eb6dbe8 | ||
|
|
a78c3e3ebd | ||
|
|
a4b70405b8 | ||
|
|
3afc3e1775 | ||
|
|
512bee6a8d | ||
|
|
66867d3f5b | ||
|
|
db36945293 | ||
|
|
5c5fdcb124 | ||
|
|
0ded366199 | ||
|
|
84a34cea3d | ||
|
|
7dc3c3666d | ||
|
|
dd07fa9453 | ||
|
|
bb1a372ac4 | ||
|
|
6661401a34 | ||
|
|
cd65fb432b | ||
|
|
caf0717789 | ||
|
|
4a610d83a3 | ||
|
|
aba4205dcc | ||
|
|
ef9b7b1492 | ||
|
|
d112f15b90 | ||
|
|
b55b264345 | ||
|
|
f61d25ebe0 | ||
|
|
ed4d1167dd | ||
|
|
dc6948cf39 | ||
|
|
4f11aa0772 | ||
|
|
6ea8ba25c4 | ||
|
|
e40361b171 | ||
|
|
1c6c73beda | ||
|
|
3f3bf5bbd2 | ||
|
|
ec793f3065 | ||
|
|
e83d366518 | ||
|
|
5a4209fe00 | ||
|
|
e7daf51193 | ||
|
|
754e4db8a9 | ||
|
|
7416829e89 | ||
|
|
c07b600acb | ||
|
|
7b44496706 | ||
|
|
67dc1e8d18 | ||
|
|
ad8ada33c9 | ||
|
|
bbb201b433 | ||
|
|
8d1faece60 | ||
|
|
a603505f90 | ||
|
|
f8c42c324f | ||
|
|
dd8ef4d996 | ||
|
|
e6ad9e4c7f | ||
|
|
2a01ca2d6f | ||
|
|
dc3363aa0d | ||
|
|
f655924323 | ||
|
|
05c066c676 | ||
|
|
1e000c2e7e | ||
|
|
fa17e719f6 | ||
|
|
ae3ced8e7c | ||
|
|
3279f6d46a | ||
|
|
6f9aef7bb4 | ||
|
|
049db1196f | ||
|
|
c8ffc23cf7 | ||
|
|
f230f2ce0e | ||
|
|
bdac6e3480 | ||
|
|
a4e9746dc7 | ||
|
|
c47495d671 | ||
|
|
44376b5652 | ||
|
|
5ae3a90d5e | ||
|
|
c7cf37898b | ||
|
|
901a0b7c23 | ||
|
|
03891db0c9 | ||
|
|
89e5668c7e | ||
|
|
1935455256 | ||
|
|
20e205189c | ||
|
|
1544e3fcff | ||
|
|
85295a9961 | ||
|
|
a54f807a45 | ||
|
|
31f6258c47 | ||
|
|
062464175e | ||
|
|
a5983c17d3 | ||
|
|
def42f0baa | ||
|
|
30ba41eb47 | ||
|
|
42f946f29e | ||
|
|
c53d7951b5 | ||
|
|
f36e264093 | ||
|
|
a3bdf64353 | ||
|
|
2aa7ea5137 | ||
|
|
462c927da6 | ||
|
|
cb87b2eac3 | ||
|
|
3739f38440 | ||
|
|
97d4a1c5c8 | ||
|
|
c2443e6f1a | ||
|
|
a7cffb547e | ||
|
|
f0c37f233e | ||
|
|
8e96039a1c | ||
|
|
36b360dfb6 | ||
|
|
60953bcc2c | ||
|
|
2c06288b40 | ||
|
|
0284b9f9e3 | ||
|
|
4e3f42dce3 | ||
|
|
5dd0c47f14 | ||
|
|
50a827e7fd | ||
|
|
d81140ccec | ||
|
|
4739083f57 | ||
|
|
c540a6657f | ||
|
|
4808a30185 | ||
|
|
1357f3cc4c | ||
|
|
d9aa6f4956 | ||
|
|
37a31c13cb | ||
|
|
35bca7d4cc | ||
|
|
f39d317d93 | ||
|
|
d4d93aabf5 | ||
|
|
c9271d9083 | ||
|
|
4f55d08c51 | ||
|
|
9c9ba4becd | ||
|
|
93caab1aec | ||
|
|
0c6bb3a641 | ||
|
|
b2e15327fe | ||
|
|
2e8be87ccf | ||
|
|
d78360982c | ||
|
|
bd0cefdb12 | ||
|
|
e2ed1eb286 | ||
|
|
822bcbf7a5 | ||
|
|
b25ec97a43 | ||
|
|
a74def9561 | ||
|
|
8821e38013 | ||
|
|
a1caebbe6f | ||
|
|
e0d821c6b6 | ||
|
|
205fc88718 | ||
|
|
95c1306166 | ||
|
|
e1ef192c10 | ||
|
|
ee4d15fed6 | ||
|
|
0040e9b6da | ||
|
|
2c10560795 | ||
|
|
5eff38eb82 | ||
|
|
b6206a6dfe | ||
|
|
e4a50f9286 | ||
|
|
213ce4555a | ||
|
|
5a16e68487 | ||
|
|
6ffbc51fb0 | ||
|
|
4d8a5ca174 | ||
|
|
0ae67db492 | ||
|
|
c4f77814ee | ||
|
|
dcab19a64f | ||
|
|
f10ca192fa | ||
|
|
92972ab6bf | ||
|
|
c351e08c43 | ||
|
|
e29855c8c6 | ||
|
|
3634fbd7e8 | ||
|
|
bb29797bfb | ||
|
|
3d5af3d248 | ||
|
|
2d7df3da6c | ||
|
|
4abc0e5134 | ||
|
|
4028579068 | ||
|
|
58f26ba8a7 | ||
|
|
2be3e4ab7f | ||
|
|
3d43ff6e57 | ||
|
|
1294da586f | ||
|
|
ac0698b772 | ||
|
|
2bd9036908 | ||
|
|
dda31b3d2f | ||
|
|
7d5e1cb9e8 | ||
|
|
56e38e8d00 | ||
|
|
4677b43c6e | ||
|
|
4ddbb97908 | ||
|
|
8b0b47145d | ||
|
|
f7e3ddcdb6 | ||
|
|
af5cff3304 | ||
|
|
cb9144bdb3 | ||
|
|
fa82634faf | ||
|
|
37b1a0289e | ||
|
|
9be33bcf93 | ||
|
|
bc9f691284 | ||
|
|
58e5605f39 | ||
|
|
75a654c766 | ||
|
|
2b058f7df7 | ||
|
|
01af2999bb | ||
|
|
c12d27f08a | ||
|
|
5e3408e80b | ||
|
|
052110618d | ||
|
|
47b8f0f656 | ||
|
|
67b2e25e39 | ||
|
|
9a08b541ed | ||
|
|
04379b4374 | ||
|
|
5cfb05b1f4 | ||
|
|
aa68ce531e | ||
|
|
d4ce304a37 | ||
|
|
8a579d9bda | ||
|
|
70cc6f22aa | ||
|
|
1674ba36b2 | ||
|
|
0c1a5c24d5 | ||
|
|
5df08300e2 | ||
|
|
543a87e166 | ||
|
|
519c8d276b | ||
|
|
4dc733d3e3 | ||
|
|
4506f38bfb | ||
|
|
b9a33c14bb | ||
|
|
50caeb1803 | ||
|
|
e57a93880b | ||
|
|
dbfc43395e | ||
|
|
89923dbaa2 | ||
|
|
780fafa604 | ||
|
|
a15f74a6f9 | ||
|
|
690635d904 | ||
|
|
d1372c5c1b | ||
|
|
5073248911 | ||
|
|
ae72e6f356 | ||
|
|
b8da986fd5 | ||
|
|
dd270258bf | ||
|
|
40dc6a39c1 | ||
|
|
8b5cbb7b4b | ||
|
|
0e476c71a5 | ||
|
|
be24b47300 | ||
|
|
8cd719da3f | ||
|
|
959d385015 | ||
|
|
6fa01d4c36 | ||
|
|
a383f3f1a3 | ||
|
|
7635aad1cb | ||
|
|
b315e84136 | ||
|
|
1d8de09a32 | ||
|
|
d2db9b8cf9 | ||
|
|
796279343e | ||
|
|
fabb3c45f1 | ||
|
|
161af51558 | ||
|
|
100ef0fa28 | ||
|
|
8994c27714 | ||
|
|
b950987229 | ||
|
|
a09b597fab | ||
|
|
c920dc6381 | ||
|
|
f4418d2d50 | ||
|
|
5ab3170f69 | ||
|
|
76fa06fa2e | ||
|
|
3a997fcf71 | ||
|
|
4b49b1b4f0 | ||
|
|
97926b05e8 | ||
|
|
6de17ae830 | ||
|
|
4c94f73546 | ||
|
|
d99df37ac5 | ||
|
|
d0f253b49b | ||
|
|
ef2ed3daa0 | ||
|
|
fc52cad109 | ||
|
|
98f365be44 | ||
|
|
b6c3cae2ad | ||
|
|
5f7fb15dd8 | ||
|
|
3a89f16332 | ||
|
|
aa3fcfbbe1 | ||
|
|
a616775f6d | ||
|
|
633af93b19 | ||
|
|
b41257f54e | ||
|
|
76b28aea74 | ||
|
|
aa315f5d72 | ||
|
|
c28b82a618 | ||
|
|
e7bdc80956 | ||
|
|
d641137537 | ||
|
|
4fd22b3219 | ||
|
|
fca0e3f619 | ||
|
|
9401c46727 | ||
|
|
6b3697ee87 | ||
|
|
c08160600e | ||
|
|
cd5c60ce1e | ||
|
|
ae1c97e27a | ||
|
|
cfee7de66b | ||
|
|
c942c492ad | ||
|
|
0e4be43b2b | ||
|
|
7eb2b60855 | ||
|
|
373ae3281e | ||
|
|
178630e3bf | ||
|
|
67f307cd43 | ||
|
|
ca2eaa9ead | ||
|
|
3c78daea0c | ||
|
|
d2baa8e721 | ||
|
|
a0cf4b4713 | ||
|
|
1bd249b0a9 | ||
|
|
2f47ec5797 | ||
|
|
80f3661b8e | ||
|
|
32eeb4a98c | ||
|
|
a74cc14ed9 | ||
|
|
5f77f83b48 | ||
|
|
d543dbca92 | ||
|
|
02f9d59f5a | ||
|
|
7b745bc7bc | ||
|
|
5ac0ef1ffd | ||
|
|
e1f3efb619 | ||
|
|
508eea0131 | ||
|
|
9e7f80b9b3 | ||
|
|
ee2def2e62 | ||
|
|
258191ab87 | ||
|
|
27e6dec018 | ||
|
|
26323dbebf | ||
|
|
484137793f | ||
|
|
24713feddc | ||
|
|
93f58524d1 | ||
|
|
0ff2e95e49 | ||
|
|
89222e7123 | ||
|
|
2468ee15e7 | ||
|
|
3440aa9fcd | ||
|
|
ce9698d39b | ||
|
|
ddfe7c5cfa | ||
|
|
01893f3712 | ||
|
|
8ae741ec72 | ||
|
|
6856466cef | ||
|
|
68292fbd26 | ||
|
|
e90c42ae68 | ||
|
|
9f9a5dce0d | ||
|
|
6739cd8d01 | ||
|
|
6cc8d9cb00 | ||
|
|
ce375b62e4 | ||
|
|
95971ac62c | ||
|
|
4ea2226dcd | ||
|
|
d752a440e5 | ||
|
|
5ce2ee2dae | ||
|
|
6fd9f0595d | ||
|
|
fcdd8a9796 | ||
|
|
640468d4e7 | ||
|
|
02fe89f7d0 | ||
|
|
24df865503 | ||
|
|
e9f8c79498 | ||
|
|
24ff75701e | ||
|
|
4221230969 | ||
|
|
d87196c105 | ||
|
|
da89415961 | ||
|
|
2d98ebf3c3 | ||
|
|
fb5e9947bd | ||
|
|
2ea85c00d3 | ||
|
|
2a3b6b917f | ||
|
|
83ed9065b0 | ||
|
|
44b825edf5 | ||
|
|
487e95a66e | ||
|
|
c465c200c4 | ||
|
|
d7716ad875 | ||
|
|
edce194948 | ||
|
|
13fdff750d | ||
|
|
bdcf110c87 | ||
|
|
dd12997744 | ||
|
|
fc160913bf | ||
|
|
92c22ef16d | ||
|
|
aff22d0855 | ||
|
|
b3d3bca15a | ||
|
|
92f38392eb | ||
|
|
30ef8df1b3 | ||
|
|
2e174adf16 | ||
|
|
4e803b1412 | ||
|
|
9b174318ce | ||
|
|
99edcbe818 | ||
|
|
ef7dc2b80f | ||
|
|
691607f269 | ||
|
|
55561a23bc | ||
|
|
f32c34f126 | ||
|
|
8f3bdaec2c | ||
|
|
69b02caf77 | ||
|
|
3854955069 | ||
|
|
9b84fc7a5b | ||
|
|
e7cb9238dc | ||
|
|
0e2cbe6178 | ||
|
|
cd076aeeeb | ||
|
|
d683faf922 | ||
|
|
0494f8ac8b | ||
|
|
48ce59900e | ||
|
|
84e95fd229 | ||
|
|
a80be78345 | ||
|
|
64130dd02e | ||
|
|
d62a6e0417 | ||
|
|
3260746785 | ||
|
|
8066ea2163 | ||
|
|
813f1df63e | ||
|
|
09bdafa718 | ||
|
|
fb0f75df43 | ||
|
|
39255df549 | ||
|
|
456495fd62 | ||
|
|
83cadc0bf3 | ||
|
|
0b1a8cd3f8 | ||
|
|
565b4ee923 | ||
|
|
7a9c1e79c2 | ||
|
|
02c6af4912 | ||
|
|
8ba4dea59f | ||
|
|
ccfda10713 | ||
|
|
bd1327592e | ||
|
|
30b22fe2bf | ||
|
|
651f257a5d | ||
|
|
a9209fd3c7 | ||
|
|
4ae4ca8ca8 | ||
|
|
8be1ddc0d8 | ||
|
|
b55fa5ec8f | ||
|
|
16c6ce850e | ||
|
|
12251e730f | ||
|
|
925b10f9fc | ||
|
|
306b653318 | ||
|
|
8791a52b7e | ||
|
|
0d9470a840 | ||
|
|
0d320c20e0 | ||
|
|
9b3ba2e1c6 | ||
|
|
dbadbf0221 | ||
|
|
173624c838 | ||
|
|
de2047adf2 | ||
|
|
5df2fe9f97 |
15
.cargo/deny.toml
Normal file
15
.cargo/deny.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[bans]
|
||||
multiple-versions = "deny"
|
||||
wildcards = "allow"
|
||||
highlight = "all"
|
||||
|
||||
# Explicitly flag the weak cryptography so the agent is forced to justify its existence
|
||||
[[bans.skip]]
|
||||
name = "md-5"
|
||||
version = "*"
|
||||
reason = "MUST VERIFY: Only allowed for legacy checksums, never for security."
|
||||
|
||||
[[bans.skip]]
|
||||
name = "sha1"
|
||||
version = "*"
|
||||
reason = "MUST VERIFY: Only allowed for backwards compatibility."
|
||||
8
.dockerignore
Normal file
8
.dockerignore
Normal file
@@ -0,0 +1,8 @@
|
||||
.git
|
||||
.github
|
||||
target
|
||||
.kilocode
|
||||
cache
|
||||
tlsfront
|
||||
*.tar
|
||||
*.tar.gz
|
||||
9
.github/codeql/codeql-config.yml
vendored
9
.github/codeql/codeql-config.yml
vendored
@@ -7,7 +7,16 @@ queries:
|
||||
- uses: security-and-quality
|
||||
- uses: ./.github/codeql/queries
|
||||
|
||||
paths-ignore:
|
||||
- "**/tests/**"
|
||||
- "**/test/**"
|
||||
- "**/*_test.rs"
|
||||
- "**/*/tests.rs"
|
||||
query-filters:
|
||||
- exclude:
|
||||
tags:
|
||||
- test
|
||||
|
||||
- exclude:
|
||||
id:
|
||||
- rust/unwrap-on-option
|
||||
|
||||
135
.github/instructions/rust_rules.instructions.md
vendored
Normal file
135
.github/instructions/rust_rules.instructions.md
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
---
|
||||
description: 'Rust programming language coding conventions and best practices'
|
||||
applyTo: '**/*.rs'
|
||||
---
|
||||
|
||||
# Rust Coding Conventions and Best Practices
|
||||
|
||||
Follow idiomatic Rust practices and community standards when writing Rust code.
|
||||
|
||||
These instructions are based on [The Rust Book](https://doc.rust-lang.org/book/), [Rust API Guidelines](https://rust-lang.github.io/api-guidelines/), [RFC 430 naming conventions](https://github.com/rust-lang/rfcs/blob/master/text/0430-finalizing-naming-conventions.md), and the broader Rust community at [users.rust-lang.org](https://users.rust-lang.org).
|
||||
|
||||
## General Instructions
|
||||
|
||||
- Always prioritize readability, safety, and maintainability.
|
||||
- Use strong typing and leverage Rust's ownership system for memory safety.
|
||||
- Break down complex functions into smaller, more manageable functions.
|
||||
- For algorithm-related code, include explanations of the approach used.
|
||||
- Write code with good maintainability practices, including comments on why certain design decisions were made.
|
||||
- Handle errors gracefully using `Result<T, E>` and provide meaningful error messages.
|
||||
- For external dependencies, mention their usage and purpose in documentation.
|
||||
- Use consistent naming conventions following [RFC 430](https://github.com/rust-lang/rfcs/blob/master/text/0430-finalizing-naming-conventions.md).
|
||||
- Write idiomatic, safe, and efficient Rust code that follows the borrow checker's rules.
|
||||
- Ensure code compiles without warnings.
|
||||
|
||||
## Patterns to Follow
|
||||
|
||||
- Use modules (`mod`) and public interfaces (`pub`) to encapsulate logic.
|
||||
- Handle errors properly using `?`, `match`, or `if let`.
|
||||
- Use `serde` for serialization and `thiserror` or `anyhow` for custom errors.
|
||||
- Implement traits to abstract services or external dependencies.
|
||||
- Structure async code using `async/await` and `tokio` or `async-std`.
|
||||
- Prefer enums over flags and states for type safety.
|
||||
- Use builders for complex object creation.
|
||||
- Split binary and library code (`main.rs` vs `lib.rs`) for testability and reuse.
|
||||
- Use `rayon` for data parallelism and CPU-bound tasks.
|
||||
- Use iterators instead of index-based loops as they're often faster and safer.
|
||||
- Use `&str` instead of `String` for function parameters when you don't need ownership.
|
||||
- Prefer borrowing and zero-copy operations to avoid unnecessary allocations.
|
||||
|
||||
### Ownership, Borrowing, and Lifetimes
|
||||
|
||||
- Prefer borrowing (`&T`) over cloning unless ownership transfer is necessary.
|
||||
- Use `&mut T` when you need to modify borrowed data.
|
||||
- Explicitly annotate lifetimes when the compiler cannot infer them.
|
||||
- Use `Rc<T>` for single-threaded reference counting and `Arc<T>` for thread-safe reference counting.
|
||||
- Use `RefCell<T>` for interior mutability in single-threaded contexts and `Mutex<T>` or `RwLock<T>` for multi-threaded contexts.
|
||||
|
||||
## Patterns to Avoid
|
||||
|
||||
- Don't use `unwrap()` or `expect()` unless absolutely necessary—prefer proper error handling.
|
||||
- Avoid panics in library code—return `Result` instead.
|
||||
- Don't rely on global mutable state—use dependency injection or thread-safe containers.
|
||||
- Avoid deeply nested logic—refactor with functions or combinators.
|
||||
- Don't ignore warnings—treat them as errors during CI.
|
||||
- Avoid `unsafe` unless required and fully documented.
|
||||
- Don't overuse `clone()`, use borrowing instead of cloning unless ownership transfer is needed.
|
||||
- Avoid premature `collect()`, keep iterators lazy until you actually need the collection.
|
||||
- Avoid unnecessary allocations—prefer borrowing and zero-copy operations.
|
||||
|
||||
## Code Style and Formatting
|
||||
|
||||
- Follow the Rust Style Guide and use `rustfmt` for automatic formatting.
|
||||
- Keep lines under 100 characters when possible.
|
||||
- Place function and struct documentation immediately before the item using `///`.
|
||||
- Use `cargo clippy` to catch common mistakes and enforce best practices.
|
||||
|
||||
## Error Handling
|
||||
|
||||
- Use `Result<T, E>` for recoverable errors and `panic!` only for unrecoverable errors.
|
||||
- Prefer `?` operator over `unwrap()` or `expect()` for error propagation.
|
||||
- Create custom error types using `thiserror` or implement `std::error::Error`.
|
||||
- Use `Option<T>` for values that may or may not exist.
|
||||
- Provide meaningful error messages and context.
|
||||
- Error types should be meaningful and well-behaved (implement standard traits).
|
||||
- Validate function arguments and return appropriate errors for invalid input.
|
||||
|
||||
## API Design Guidelines
|
||||
|
||||
### Common Traits Implementation
|
||||
Eagerly implement common traits where appropriate:
|
||||
- `Copy`, `Clone`, `Eq`, `PartialEq`, `Ord`, `PartialOrd`, `Hash`, `Debug`, `Display`, `Default`
|
||||
- Use standard conversion traits: `From`, `AsRef`, `AsMut`
|
||||
- Collections should implement `FromIterator` and `Extend`
|
||||
- Note: `Send` and `Sync` are auto-implemented by the compiler when safe; avoid manual implementation unless using `unsafe` code
|
||||
|
||||
### Type Safety and Predictability
|
||||
- Use newtypes to provide static distinctions
|
||||
- Arguments should convey meaning through types; prefer specific types over generic `bool` parameters
|
||||
- Use `Option<T>` appropriately for truly optional values
|
||||
- Functions with a clear receiver should be methods
|
||||
- Only smart pointers should implement `Deref` and `DerefMut`
|
||||
|
||||
### Future Proofing
|
||||
- Use sealed traits to protect against downstream implementations
|
||||
- Structs should have private fields
|
||||
- Functions should validate their arguments
|
||||
- All public types must implement `Debug`
|
||||
|
||||
## Testing and Documentation
|
||||
|
||||
- Write comprehensive unit tests using `#[cfg(test)]` modules and `#[test]` annotations.
|
||||
- Use test modules alongside the code they test (`mod tests { ... }`).
|
||||
- Write integration tests in `tests/` directory with descriptive filenames.
|
||||
- Write clear and concise comments for each function, struct, enum, and complex logic.
|
||||
- Ensure functions have descriptive names and include comprehensive documentation.
|
||||
- Document all public APIs with rustdoc (`///` comments) following the [API Guidelines](https://rust-lang.github.io/api-guidelines/).
|
||||
- Use `#[doc(hidden)]` to hide implementation details from public documentation.
|
||||
- Document error conditions, panic scenarios, and safety considerations.
|
||||
- Examples should use `?` operator, not `unwrap()` or deprecated `try!` macro.
|
||||
|
||||
## Project Organization
|
||||
|
||||
- Use semantic versioning in `Cargo.toml`.
|
||||
- Include comprehensive metadata: `description`, `license`, `repository`, `keywords`, `categories`.
|
||||
- Use feature flags for optional functionality.
|
||||
- Organize code into modules using `mod.rs` or named files.
|
||||
- Keep `main.rs` or `lib.rs` minimal - move logic to modules.
|
||||
|
||||
## Quality Checklist
|
||||
|
||||
Before publishing or reviewing Rust code, ensure:
|
||||
|
||||
### Core Requirements
|
||||
- [ ] **Naming**: Follows RFC 430 naming conventions
|
||||
- [ ] **Traits**: Implements `Debug`, `Clone`, `PartialEq` where appropriate
|
||||
- [ ] **Error Handling**: Uses `Result<T, E>` and provides meaningful error types
|
||||
- [ ] **Documentation**: All public items have rustdoc comments with examples
|
||||
- [ ] **Testing**: Comprehensive test coverage including edge cases
|
||||
|
||||
### Safety and Quality
|
||||
- [ ] **Safety**: No unnecessary `unsafe` code, proper error handling
|
||||
- [ ] **Performance**: Efficient use of iterators, minimal allocations
|
||||
- [ ] **API Design**: Functions are predictable, flexible, and type-safe
|
||||
- [ ] **Future Proofing**: Private fields in structs, sealed traits where appropriate
|
||||
- [ ] **Tooling**: Code passes `cargo fmt`, `cargo clippy`, and `cargo test`
|
||||
162
.github/instructions/self-explanatory-code-commenting.instructions.md
vendored
Normal file
162
.github/instructions/self-explanatory-code-commenting.instructions.md
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
---
|
||||
description: 'Guidelines for GitHub Copilot to write comments to achieve self-explanatory code with less comments. Examples are in JavaScript but it should work on any language that has comments.'
|
||||
applyTo: '**'
|
||||
---
|
||||
|
||||
# Self-explanatory Code Commenting Instructions
|
||||
|
||||
## Core Principle
|
||||
**Write code that speaks for itself. Comment only when necessary to explain WHY, not WHAT.**
|
||||
We do not need comments most of the time.
|
||||
|
||||
## Commenting Guidelines
|
||||
|
||||
### ❌ AVOID These Comment Types
|
||||
|
||||
**Obvious Comments**
|
||||
```javascript
|
||||
// Bad: States the obvious
|
||||
let counter = 0; // Initialize counter to zero
|
||||
counter++; // Increment counter by one
|
||||
```
|
||||
|
||||
**Redundant Comments**
|
||||
```javascript
|
||||
// Bad: Comment repeats the code
|
||||
function getUserName() {
|
||||
return user.name; // Return the user's name
|
||||
}
|
||||
```
|
||||
|
||||
**Outdated Comments**
|
||||
```javascript
|
||||
// Bad: Comment doesn't match the code
|
||||
// Calculate tax at 5% rate
|
||||
const tax = price * 0.08; // Actually 8%
|
||||
```
|
||||
|
||||
### ✅ WRITE These Comment Types
|
||||
|
||||
**Complex Business Logic**
|
||||
```javascript
|
||||
// Good: Explains WHY this specific calculation
|
||||
// Apply progressive tax brackets: 10% up to 10k, 20% above
|
||||
const tax = calculateProgressiveTax(income, [0.10, 0.20], [10000]);
|
||||
```
|
||||
|
||||
**Non-obvious Algorithms**
|
||||
```javascript
|
||||
// Good: Explains the algorithm choice
|
||||
// Using Floyd-Warshall for all-pairs shortest paths
|
||||
// because we need distances between all nodes
|
||||
for (let k = 0; k < vertices; k++) {
|
||||
for (let i = 0; i < vertices; i++) {
|
||||
for (let j = 0; j < vertices; j++) {
|
||||
// ... implementation
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Regex Patterns**
|
||||
```javascript
|
||||
// Good: Explains what the regex matches
|
||||
// Match email format: username@domain.extension
|
||||
const emailPattern = /^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$/;
|
||||
```
|
||||
|
||||
**API Constraints or Gotchas**
|
||||
```javascript
|
||||
// Good: Explains external constraint
|
||||
// GitHub API rate limit: 5000 requests/hour for authenticated users
|
||||
await rateLimiter.wait();
|
||||
const response = await fetch(githubApiUrl);
|
||||
```
|
||||
|
||||
## Decision Framework
|
||||
|
||||
Before writing a comment, ask:
|
||||
1. **Is the code self-explanatory?** → No comment needed
|
||||
2. **Would a better variable/function name eliminate the need?** → Refactor instead
|
||||
3. **Does this explain WHY, not WHAT?** → Good comment
|
||||
4. **Will this help future maintainers?** → Good comment
|
||||
|
||||
## Special Cases for Comments
|
||||
|
||||
### Public APIs
|
||||
```javascript
|
||||
/**
|
||||
* Calculate compound interest using the standard formula.
|
||||
*
|
||||
* @param {number} principal - Initial amount invested
|
||||
* @param {number} rate - Annual interest rate (as decimal, e.g., 0.05 for 5%)
|
||||
* @param {number} time - Time period in years
|
||||
* @param {number} compoundFrequency - How many times per year interest compounds (default: 1)
|
||||
* @returns {number} Final amount after compound interest
|
||||
*/
|
||||
function calculateCompoundInterest(principal, rate, time, compoundFrequency = 1) {
|
||||
// ... implementation
|
||||
}
|
||||
```
|
||||
|
||||
### Configuration and Constants
|
||||
```javascript
|
||||
// Good: Explains the source or reasoning
|
||||
const MAX_RETRIES = 3; // Based on network reliability studies
|
||||
const API_TIMEOUT = 5000; // AWS Lambda timeout is 15s, leaving buffer
|
||||
```
|
||||
|
||||
### Annotations
|
||||
```javascript
|
||||
// TODO: Replace with proper user authentication after security review
|
||||
// FIXME: Memory leak in production - investigate connection pooling
|
||||
// HACK: Workaround for bug in library v2.1.0 - remove after upgrade
|
||||
// NOTE: This implementation assumes UTC timezone for all calculations
|
||||
// WARNING: This function modifies the original array instead of creating a copy
|
||||
// PERF: Consider caching this result if called frequently in hot path
|
||||
// SECURITY: Validate input to prevent SQL injection before using in query
|
||||
// BUG: Edge case failure when array is empty - needs investigation
|
||||
// REFACTOR: Extract this logic into separate utility function for reusability
|
||||
// DEPRECATED: Use newApiFunction() instead - this will be removed in v3.0
|
||||
```
|
||||
|
||||
## Anti-Patterns to Avoid
|
||||
|
||||
### Dead Code Comments
|
||||
```javascript
|
||||
// Bad: Don't comment out code
|
||||
// const oldFunction = () => { ... };
|
||||
const newFunction = () => { ... };
|
||||
```
|
||||
|
||||
### Changelog Comments
|
||||
```javascript
|
||||
// Bad: Don't maintain history in comments
|
||||
// Modified by John on 2023-01-15
|
||||
// Fixed bug reported by Sarah on 2023-02-03
|
||||
function processData() {
|
||||
// ... implementation
|
||||
}
|
||||
```
|
||||
|
||||
### Divider Comments
|
||||
```javascript
|
||||
// Bad: Don't use decorative comments
|
||||
//=====================================
|
||||
// UTILITY FUNCTIONS
|
||||
//=====================================
|
||||
```
|
||||
|
||||
## Quality Checklist
|
||||
|
||||
Before committing, ensure your comments:
|
||||
- [ ] Explain WHY, not WHAT
|
||||
- [ ] Are grammatically correct and clear
|
||||
- [ ] Will remain accurate as code evolves
|
||||
- [ ] Add genuine value to code understanding
|
||||
- [ ] Are placed appropriately (above the code they describe)
|
||||
- [ ] Use proper spelling and professional language
|
||||
|
||||
## Summary
|
||||
|
||||
Remember: **The best comment is the one you don't need to write because the code is self-documenting.**
|
||||
39
.github/workflows/build.yml
vendored
Normal file
39
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install latest stable Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Cache cargo registry & build artifacts
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Build Release
|
||||
run: cargo build --release --verbose
|
||||
414
.github/workflows/release.yml
vendored
414
.github/workflows/release.yml
vendored
@@ -5,37 +5,87 @@ on:
|
||||
tags:
|
||||
- '[0-9]+.[0-9]+.[0-9]+'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'Release tag (example: 3.3.15)'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
concurrency:
|
||||
group: release-${{ github.ref_name }}-${{ github.event.inputs.tag || 'auto' }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
BINARY_NAME: telemt
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build ${{ matrix.target }}
|
||||
prepare:
|
||||
name: Prepare
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
outputs:
|
||||
version: ${{ steps.vars.outputs.version }}
|
||||
prerelease: ${{ steps.vars.outputs.prerelease }}
|
||||
|
||||
steps:
|
||||
- name: Resolve version
|
||||
id: vars
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
if [ "${GITHUB_EVENT_NAME}" = "workflow_dispatch" ]; then
|
||||
VERSION="${{ github.event.inputs.tag }}"
|
||||
else
|
||||
VERSION="${GITHUB_REF#refs/tags/}"
|
||||
fi
|
||||
|
||||
VERSION="${VERSION#refs/tags/}"
|
||||
|
||||
if [ -z "${VERSION}" ]; then
|
||||
echo "Release version is empty" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${VERSION}" == *-* ]]; then
|
||||
PRERELEASE=true
|
||||
else
|
||||
PRERELEASE=false
|
||||
fi
|
||||
|
||||
echo "version=${VERSION}" >> "${GITHUB_OUTPUT}"
|
||||
echo "prerelease=${PRERELEASE}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
# ==========================
|
||||
# GNU / glibc
|
||||
# ==========================
|
||||
build-gnu:
|
||||
name: GNU ${{ matrix.asset }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: prepare
|
||||
|
||||
container:
|
||||
image: rust:slim-bookworm
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- target: x86_64-unknown-linux-gnu
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-x86_64-linux-gnu
|
||||
asset: telemt-x86_64-linux-gnu
|
||||
cpu: baseline
|
||||
|
||||
- target: x86_64-unknown-linux-gnu
|
||||
asset: telemt-x86_64-v3-linux-gnu
|
||||
cpu: v3
|
||||
|
||||
- target: aarch64-unknown-linux-gnu
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-aarch64-linux-gnu
|
||||
- target: x86_64-unknown-linux-musl
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-x86_64-linux-musl
|
||||
- target: aarch64-unknown-linux-musl
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-aarch64-linux-musl
|
||||
asset: telemt-aarch64-linux-gnu
|
||||
cpu: generic
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -43,47 +93,245 @@ jobs:
|
||||
- uses: dtolnay/rust-toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
targets: ${{ matrix.target }}
|
||||
targets: |
|
||||
x86_64-unknown-linux-gnu
|
||||
aarch64-unknown-linux-gnu
|
||||
|
||||
- name: Install cross-compilation tools
|
||||
- name: Install deps
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu
|
||||
apt-get update
|
||||
apt-get install -y \
|
||||
build-essential \
|
||||
clang \
|
||||
lld \
|
||||
pkg-config \
|
||||
gcc-aarch64-linux-gnu \
|
||||
g++-aarch64-linux-gnu
|
||||
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
/usr/local/cargo/registry
|
||||
/usr/local/cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-${{ matrix.target }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
key: gnu-${{ matrix.asset }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.target }}-cargo-
|
||||
gnu-${{ matrix.asset }}-
|
||||
gnu-
|
||||
|
||||
- name: Install cross
|
||||
run: cargo install cross --git https://github.com/cross-rs/cross
|
||||
|
||||
- name: Build Release
|
||||
env:
|
||||
RUSTFLAGS: ${{ contains(matrix.target, 'musl') && '-C target-feature=+crt-static' || '' }}
|
||||
run: cross build --release --target ${{ matrix.target }}
|
||||
|
||||
- name: Package binary
|
||||
- name: Build
|
||||
shell: bash
|
||||
run: |
|
||||
cd target/${{ matrix.target }}/release
|
||||
tar -czvf ${{ matrix.asset_name }}.tar.gz ${{ matrix.artifact_name }}
|
||||
sha256sum ${{ matrix.asset_name }}.tar.gz > ${{ matrix.asset_name }}.sha256
|
||||
set -euo pipefail
|
||||
|
||||
if [ "${{ matrix.target }}" = "aarch64-unknown-linux-gnu" ]; then
|
||||
export CC=aarch64-linux-gnu-gcc
|
||||
export CXX=aarch64-linux-gnu-g++
|
||||
export RUSTFLAGS="-C linker=aarch64-linux-gnu-gcc -C lto=fat -C panic=abort"
|
||||
else
|
||||
export CC=clang
|
||||
export CXX=clang++
|
||||
|
||||
if [ "${{ matrix.cpu }}" = "v3" ]; then
|
||||
CPU_FLAGS="-C target-cpu=x86-64-v3"
|
||||
else
|
||||
CPU_FLAGS="-C target-cpu=x86-64"
|
||||
fi
|
||||
|
||||
export RUSTFLAGS="-C linker=clang -C link-arg=-fuse-ld=lld -C lto=fat -C panic=abort ${CPU_FLAGS}"
|
||||
fi
|
||||
|
||||
cargo build --release --target ${{ matrix.target }} -j "$(nproc)"
|
||||
|
||||
- name: Package
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
mkdir -p dist
|
||||
cp "target/${{ matrix.target }}/release/${{ env.BINARY_NAME }}" dist/telemt
|
||||
|
||||
cd dist
|
||||
tar -czf "${{ matrix.asset }}.tar.gz" \
|
||||
--owner=0 --group=0 --numeric-owner \
|
||||
telemt
|
||||
|
||||
sha256sum "${{ matrix.asset }}.tar.gz" > "${{ matrix.asset }}.tar.gz.sha256"
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.asset_name }}
|
||||
path: |
|
||||
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.tar.gz
|
||||
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.sha256
|
||||
name: ${{ matrix.asset }}
|
||||
path: dist/*
|
||||
|
||||
build-docker-image:
|
||||
needs: build
|
||||
# ==========================
|
||||
# MUSL
|
||||
# ==========================
|
||||
build-musl:
|
||||
name: MUSL ${{ matrix.asset }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: prepare
|
||||
|
||||
container:
|
||||
image: rust:slim-bookworm
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- target: x86_64-unknown-linux-musl
|
||||
asset: telemt-x86_64-linux-musl
|
||||
cpu: baseline
|
||||
|
||||
- target: x86_64-unknown-linux-musl
|
||||
asset: telemt-x86_64-v3-linux-musl
|
||||
cpu: v3
|
||||
|
||||
- target: aarch64-unknown-linux-musl
|
||||
asset: telemt-aarch64-linux-musl
|
||||
cpu: generic
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install deps
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install -y \
|
||||
musl-tools \
|
||||
pkg-config \
|
||||
curl
|
||||
|
||||
- uses: actions/cache@v4
|
||||
if: matrix.target == 'aarch64-unknown-linux-musl'
|
||||
with:
|
||||
path: ~/.musl-aarch64
|
||||
key: musl-toolchain-aarch64-v1
|
||||
|
||||
- name: Install aarch64 musl toolchain
|
||||
if: matrix.target == 'aarch64-unknown-linux-musl'
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
TOOLCHAIN_DIR="$HOME/.musl-aarch64"
|
||||
ARCHIVE="aarch64-linux-musl-cross.tgz"
|
||||
URL="https://github.com/telemt/telemt/releases/download/toolchains/${ARCHIVE}"
|
||||
|
||||
if [ -x "${TOOLCHAIN_DIR}/bin/aarch64-linux-musl-gcc" ]; then
|
||||
echo "MUSL toolchain cached"
|
||||
else
|
||||
curl -fL \
|
||||
--retry 5 \
|
||||
--retry-delay 3 \
|
||||
--connect-timeout 10 \
|
||||
--max-time 120 \
|
||||
-o "${ARCHIVE}" "${URL}"
|
||||
|
||||
mkdir -p "${TOOLCHAIN_DIR}"
|
||||
tar -xzf "${ARCHIVE}" --strip-components=1 -C "${TOOLCHAIN_DIR}"
|
||||
fi
|
||||
|
||||
echo "${TOOLCHAIN_DIR}/bin" >> "${GITHUB_PATH}"
|
||||
|
||||
- name: Add rust target
|
||||
run: rustup target add ${{ matrix.target }}
|
||||
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
/usr/local/cargo/registry
|
||||
/usr/local/cargo/git
|
||||
target
|
||||
key: musl-${{ matrix.asset }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
musl-${{ matrix.asset }}-
|
||||
musl-
|
||||
|
||||
- name: Build
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
if [ "${{ matrix.target }}" = "aarch64-unknown-linux-musl" ]; then
|
||||
export CC=aarch64-linux-musl-gcc
|
||||
export CC_aarch64_unknown_linux_musl=aarch64-linux-musl-gcc
|
||||
export RUSTFLAGS="-C target-feature=+crt-static -C linker=aarch64-linux-musl-gcc -C lto=fat -C panic=abort"
|
||||
else
|
||||
export CC=musl-gcc
|
||||
export CC_x86_64_unknown_linux_musl=musl-gcc
|
||||
|
||||
if [ "${{ matrix.cpu }}" = "v3" ]; then
|
||||
CPU_FLAGS="-C target-cpu=x86-64-v3"
|
||||
else
|
||||
CPU_FLAGS="-C target-cpu=x86-64"
|
||||
fi
|
||||
|
||||
export RUSTFLAGS="-C target-feature=+crt-static -C lto=fat -C panic=abort ${CPU_FLAGS}"
|
||||
fi
|
||||
|
||||
cargo build --release --target ${{ matrix.target }} -j "$(nproc)"
|
||||
|
||||
- name: Package
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
mkdir -p dist
|
||||
cp "target/${{ matrix.target }}/release/${{ env.BINARY_NAME }}" dist/telemt
|
||||
|
||||
cd dist
|
||||
tar -czf "${{ matrix.asset }}.tar.gz" \
|
||||
--owner=0 --group=0 --numeric-owner \
|
||||
telemt
|
||||
|
||||
sha256sum "${{ matrix.asset }}.tar.gz" > "${{ matrix.asset }}.tar.gz.sha256"
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.asset }}
|
||||
path: dist/*
|
||||
|
||||
# ==========================
|
||||
# Release
|
||||
# ==========================
|
||||
release:
|
||||
name: Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [prepare, build-gnu, build-musl]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Flatten artifacts
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
mkdir -p dist
|
||||
find artifacts -type f -exec cp {} dist/ \;
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
tag_name: ${{ needs.prepare.outputs.version }}
|
||||
target_commitish: ${{ github.sha }}
|
||||
files: dist/*
|
||||
generate_release_notes: true
|
||||
prerelease: ${{ needs.prepare.outputs.prerelease == 'true' }}
|
||||
overwrite_files: true
|
||||
|
||||
# ==========================
|
||||
# Docker
|
||||
# ==========================
|
||||
docker:
|
||||
name: Docker
|
||||
runs-on: ubuntu-latest
|
||||
needs: [prepare, release]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
@@ -92,48 +340,66 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: docker/setup-qemu-action@v3
|
||||
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
- uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract version
|
||||
id: vars
|
||||
run: echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||
- name: Probe release assets
|
||||
shell: bash
|
||||
env:
|
||||
VERSION: ${{ needs.prepare.outputs.version }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
- name: Build and push
|
||||
for asset in \
|
||||
telemt-x86_64-linux-musl.tar.gz \
|
||||
telemt-x86_64-linux-musl.tar.gz.sha256 \
|
||||
telemt-aarch64-linux-musl.tar.gz \
|
||||
telemt-aarch64-linux-musl.tar.gz.sha256
|
||||
do
|
||||
curl -fsIL \
|
||||
--retry 10 \
|
||||
--retry-delay 3 \
|
||||
"https://github.com/${GITHUB_REPOSITORY}/releases/download/${VERSION}/${asset}" \
|
||||
> /dev/null
|
||||
done
|
||||
|
||||
- name: Compute image tags
|
||||
id: meta
|
||||
shell: bash
|
||||
env:
|
||||
VERSION: ${{ needs.prepare.outputs.version }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
IMAGE="$(echo "ghcr.io/${GITHUB_REPOSITORY}" | tr '[:upper:]' '[:lower:]')"
|
||||
TAGS="${IMAGE}:${VERSION}"
|
||||
|
||||
if [[ "${VERSION}" != *-* ]]; then
|
||||
TAGS="${TAGS}"$'\n'"${IMAGE}:latest"
|
||||
fi
|
||||
|
||||
{
|
||||
echo "tags<<EOF"
|
||||
printf '%s\n' "${TAGS}"
|
||||
echo "EOF"
|
||||
} >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Build & Push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
ghcr.io/${{ github.repository }}:${{ steps.vars.outputs.VERSION }}
|
||||
ghcr.io/${{ github.repository }}:latest
|
||||
|
||||
release:
|
||||
name: Create Release
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Create Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: artifacts/**/*
|
||||
generate_release_notes: true
|
||||
draft: false
|
||||
prerelease: ${{ contains(github.ref, '-rc') || contains(github.ref, '-beta') || contains(github.ref, '-alpha') }}
|
||||
pull: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
build-args: |
|
||||
TELEMT_REPOSITORY=${{ github.repository }}
|
||||
TELEMT_VERSION=${{ needs.prepare.outputs.version }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
54
.github/workflows/rust.yml
vendored
54
.github/workflows/rust.yml
vendored
@@ -1,54 +0,0 @@
|
||||
name: Rust
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write
|
||||
checks: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install latest stable Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: rustfmt, clippy
|
||||
|
||||
- name: Cache cargo registry & build artifacts
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Build Release
|
||||
run: cargo build --release --verbose
|
||||
|
||||
- name: Run tests
|
||||
run: cargo test --verbose
|
||||
|
||||
# clippy dont fail on warnings because of active development of telemt
|
||||
# and many warnings
|
||||
- name: Run clippy
|
||||
run: cargo clippy -- --cap-lints warn
|
||||
|
||||
- name: Check for unused dependencies
|
||||
run: cargo udeps || true
|
||||
139
.github/workflows/test.yml
vendored
Normal file
139
.github/workflows/test.yml
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
name: Check
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
concurrency:
|
||||
group: test-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
# ==========================
|
||||
# Formatting
|
||||
# ==========================
|
||||
fmt:
|
||||
name: Fmt
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: rustfmt
|
||||
|
||||
- run: cargo fmt -- --check
|
||||
|
||||
# ==========================
|
||||
# Tests
|
||||
# ==========================
|
||||
test:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write
|
||||
checks: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-nextest-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-nextest-
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Install cargo-nextest
|
||||
run: cargo install --locked cargo-nextest || true
|
||||
|
||||
- name: Run tests with nextest
|
||||
run: cargo nextest run -j "$(nproc)"
|
||||
|
||||
# ==========================
|
||||
# Clippy
|
||||
# ==========================
|
||||
clippy:
|
||||
name: Clippy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
checks: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: clippy
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-clippy-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-clippy-
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Run clippy
|
||||
run: cargo clippy -j "$(nproc)" -- --cap-lints warn
|
||||
|
||||
# ==========================
|
||||
# Udeps
|
||||
# ==========================
|
||||
udeps:
|
||||
name: Udeps
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: rust-src
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-udeps-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-udeps-
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Install cargo-udeps
|
||||
run: cargo install --locked cargo-udeps || true
|
||||
|
||||
- name: Run udeps
|
||||
run: cargo udeps -j "$(nproc)" || true
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -21,3 +21,4 @@ target
|
||||
#.idea/
|
||||
|
||||
proxy-secret
|
||||
coverage-html/
|
||||
22
AGENTS.md
22
AGENTS.md
@@ -5,6 +5,22 @@ Your responses are precise, minimal, and architecturally sound. You are working
|
||||
|
||||
---
|
||||
|
||||
### Context: The Telemt Project
|
||||
|
||||
You are working on **Telemt**, a high-performance, production-grade Telegram MTProxy implementation written in Rust. It is explicitly designed to operate in highly hostile network environments and evade advanced network censorship.
|
||||
|
||||
**Adversarial Threat Model:**
|
||||
The proxy operates under constant surveillance by DPI (Deep Packet Inspection) systems and active scanners (state firewalls, mobile operator fraud controls). These entities actively probe IPs, analyze protocol handshakes, and look for known proxy signatures to block or throttle traffic.
|
||||
|
||||
**Core Architectural Pillars:**
|
||||
1. **TLS-Fronting (TLS-F) & TCP-Splitting (TCP-S):** To the outside world, Telemt looks like a standard TLS server. If a client presents a valid MTProxy key, the connection is handled internally. If a censor's scanner, web browser, or unauthorized crawler connects, Telemt seamlessly splices the TCP connection (L4) to a real, legitimate HTTPS fallback server (e.g., Nginx) without modifying the `ClientHello` or terminating the TLS handshake.
|
||||
2. **Middle-End (ME) Orchestration:** A highly concurrent, generation-based pool managing upstream connections to Telegram Datacenters (DCs). It utilizes an **Adaptive Floor** (dynamically scaling writer connections based on traffic), **Hardswaps** (zero-downtime pool reconfiguration), and **STUN/NAT** reflection mechanisms.
|
||||
3. **Strict KDF Routing:** Cryptographic Key Derivation Functions (KDF) in this protocol strictly rely on the exact pairing of Source IP/Port and Destination IP/Port. Deviations or missing port logic will silently break the MTProto handshake.
|
||||
4. **Data Plane vs. Control Plane Isolation:** The Data Plane (readers, writers, payload relay, TCP splicing) must remain strictly non-blocking, zero-allocation in hot paths, and highly resilient to network backpressure. The Control Plane (API, metrics, pool generation swaps, config reloads) orchestrates the state asynchronously without stalling the Data Plane.
|
||||
|
||||
Any modification you make must preserve Telemt's invisibility to censors, its strict memory-safety invariants, and its hot-path throughput.
|
||||
|
||||
|
||||
### 0. Priority Resolution — Scope Control
|
||||
|
||||
This section resolves conflicts between code quality enforcement and scope limitation.
|
||||
@@ -374,6 +390,12 @@ you MUST explain why existing invariants remain valid.
|
||||
- Do not modify existing tests unless the task explicitly requires it.
|
||||
- Do not weaken assertions.
|
||||
- Preserve determinism in testable components.
|
||||
- Bug-first forces the discipline of proving you understand a bug before you fix it. Tests written after a fix almost always pass trivially and catch nothing new.
|
||||
- Invariants over scenarios is the core shift. The route_mode table alone would have caught both BUG-1 and BUG-2 before they were written — "snapshot equals watch state after any transition burst" is a two-line property test that fails immediately on the current diverged-atomics code.
|
||||
- Differential/model catches logic drift over time.
|
||||
- Scheduler pressure is specifically aimed at the concurrent state bugs that keep reappearing. A single-threaded happy-path test of set_mode will never find subtle bugs; 10,000 concurrent calls will find it on the first run.
|
||||
- Mutation gate answers your original complaint directly. It measures test power. If you can remove a bounds check and nothing breaks, the suite isn't covering that branch yet — it just says so explicitly.
|
||||
- Dead parameter is a code smell rule.
|
||||
|
||||
### 15. Security Constraints
|
||||
|
||||
|
||||
212
CODE_OF_CONDUCT.md
Normal file
212
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,212 @@
|
||||
# Code of Conduct
|
||||
|
||||
## Purpose
|
||||
|
||||
**Telemt exists to solve technical problems.**
|
||||
|
||||
Telemt is open to contributors who want to learn, improve and build meaningful systems together.
|
||||
|
||||
It is a place for building, testing, reasoning, documenting, and improving systems.
|
||||
|
||||
Discussions that advance this work are in scope. Discussions that divert it are not.
|
||||
|
||||
Technology has consequences. Responsibility is inherent.
|
||||
|
||||
> **Zweck bestimmt die Form.**
|
||||
|
||||
> Purpose defines form.
|
||||
|
||||
---
|
||||
|
||||
## Principles
|
||||
|
||||
* **Technical over emotional**
|
||||
|
||||
Arguments are grounded in data, logs, reproducible cases, or clear reasoning.
|
||||
|
||||
* **Clarity over noise**
|
||||
|
||||
Communication is structured, concise, and relevant.
|
||||
|
||||
* **Openness with standards**
|
||||
|
||||
Participation is open. The work remains disciplined.
|
||||
|
||||
* **Independence of judgment**
|
||||
|
||||
Claims are evaluated on technical merit, not affiliation or posture.
|
||||
|
||||
* **Responsibility over capability**
|
||||
|
||||
Capability does not justify careless use.
|
||||
|
||||
* **Cooperation over friction**
|
||||
|
||||
Progress depends on coordination, mutual support, and honest review.
|
||||
|
||||
* **Good intent, rigorous method**
|
||||
|
||||
Assume good intent, but require rigor.
|
||||
|
||||
> **Aussagen gelten nach ihrer Begründung.**
|
||||
|
||||
> Claims are weighed by evidence.
|
||||
|
||||
---
|
||||
|
||||
## Expected Behavior
|
||||
|
||||
Participants are expected to:
|
||||
|
||||
* Communicate directly and respectfully
|
||||
* Support claims with evidence
|
||||
* Stay within technical scope
|
||||
* Accept critique and provide it constructively
|
||||
* Reduce noise, duplication, and ambiguity
|
||||
* Help others reach correct and reproducible outcomes
|
||||
* Act in a way that improves the system as a whole
|
||||
|
||||
Precision is learned.
|
||||
|
||||
New contributors are welcome. They are expected to grow into these standards. Existing contributors are expected to make that growth possible.
|
||||
|
||||
> **Wer behauptet, belegt.**
|
||||
|
||||
> Whoever claims, proves.
|
||||
|
||||
---
|
||||
|
||||
## Unacceptable Behavior
|
||||
|
||||
The following is not allowed:
|
||||
|
||||
* Personal attacks, insults, harassment, or intimidation
|
||||
* Repeatedly derailing discussion away from Telemt’s purpose
|
||||
* Spam, flooding, or repeated low-quality input
|
||||
* Misinformation presented as fact
|
||||
* Attempts to degrade, destabilize, or exhaust Telemt or its participants
|
||||
* Use of Telemt or its spaces to enable harm
|
||||
|
||||
Telemt is not a venue for disputes that displace technical work.
|
||||
Such discussions may be closed, removed, or redirected.
|
||||
|
||||
> **Störung ist kein Beitrag.**
|
||||
|
||||
> Disruption is not contribution.
|
||||
|
||||
---
|
||||
|
||||
## Security and Misuse
|
||||
|
||||
Telemt is intended for responsible use.
|
||||
|
||||
* Do not use it to plan, coordinate, or execute harm
|
||||
* Do not publish vulnerabilities without responsible disclosure
|
||||
* Report security issues privately where possible
|
||||
|
||||
Security is both technical and behavioral.
|
||||
|
||||
> **Verantwortung endet nicht am Code.**
|
||||
|
||||
> Responsibility does not end at the code.
|
||||
|
||||
---
|
||||
|
||||
## 6. Openness
|
||||
|
||||
Telemt is open to contributors of different backgrounds, experience levels, and working styles.
|
||||
|
||||
- Standards are public, legible, and applied to the work itself.
|
||||
- Questions are welcome. Careful disagreement is welcome. Honest correction is welcome.
|
||||
- Gatekeeping by obscurity, status signaling, or hostility is not.
|
||||
|
||||
---
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies to all official spaces:
|
||||
|
||||
* Source repositories (issues, pull requests, discussions)
|
||||
* Documentation
|
||||
* Communication channels associated with Telemt
|
||||
|
||||
---
|
||||
|
||||
## Maintainer Stewardship
|
||||
|
||||
Maintainers are responsible for final decisions in matters of conduct, scope, and direction.
|
||||
|
||||
This responsibility is stewardship:
|
||||
- preserving continuity,
|
||||
- protecting signal,
|
||||
- maintaining standards,
|
||||
- keeping Telemt workable for others.
|
||||
|
||||
Judgment should be exercised with restraint, consistency, and institutional responsibility.
|
||||
- Not every decision requires extended debate.
|
||||
- Not every intervention requires public explanation.
|
||||
|
||||
All decisions are expected to serve the durability, clarity, and integrity of Telemt.
|
||||
|
||||
> **Ordnung ist Voraussetzung der Funktion.**
|
||||
|
||||
> Order is the precondition of function.
|
||||
|
||||
---
|
||||
|
||||
## Enforcement
|
||||
|
||||
Maintainers may act to preserve the integrity of Telemt, including by:
|
||||
|
||||
* Removing content
|
||||
* Locking discussions
|
||||
* Rejecting contributions
|
||||
* Restricting or banning participants
|
||||
|
||||
Actions are taken to maintain function, continuity, and signal quality.
|
||||
- Where possible, correction is preferred to exclusion.
|
||||
- Where necessary, exclusion is preferred to decay.
|
||||
|
||||
---
|
||||
|
||||
## Final
|
||||
|
||||
Telemt is built on discipline, structure, and shared intent.
|
||||
- Signal over noise.
|
||||
- Facts over opinion.
|
||||
- Systems over rhetoric.
|
||||
|
||||
- Work is collective.
|
||||
- Outcomes are shared.
|
||||
- Responsibility is distributed.
|
||||
|
||||
- Precision is learned.
|
||||
- Rigor is expected.
|
||||
- Help is part of the work.
|
||||
|
||||
> **Ordnung ist Voraussetzung der Freiheit.**
|
||||
|
||||
- If you contribute — contribute with care.
|
||||
- If you speak — speak with substance.
|
||||
- If you engage — engage constructively.
|
||||
|
||||
---
|
||||
|
||||
## After All
|
||||
|
||||
Systems outlive intentions.
|
||||
- What is built will be used.
|
||||
- What is released will propagate.
|
||||
- What is maintained will define the future state.
|
||||
|
||||
There is no neutral infrastructure, only infrastructure shaped well or poorly.
|
||||
|
||||
> **Jedes System trägt Verantwortung.**
|
||||
|
||||
> Every system carries responsibility.
|
||||
|
||||
- Stability requires discipline.
|
||||
- Freedom requires structure.
|
||||
- Trust requires honesty.
|
||||
|
||||
In the end: the system reflects its contributors.
|
||||
@@ -1,3 +1,8 @@
|
||||
# Issues - Rules
|
||||
## What it is not
|
||||
- NOT Question and Answer
|
||||
- NOT Helpdesk
|
||||
|
||||
# Pull Requests - Rules
|
||||
## General
|
||||
- ONLY signed and verified commits
|
||||
|
||||
1541
Cargo.lock
generated
1541
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
37
Cargo.toml
37
Cargo.toml
@@ -1,8 +1,11 @@
|
||||
[package]
|
||||
name = "telemt"
|
||||
version = "3.2.0"
|
||||
version = "3.3.31"
|
||||
edition = "2024"
|
||||
|
||||
[features]
|
||||
redteam_offline_expected_fail = []
|
||||
|
||||
[dependencies]
|
||||
# C
|
||||
libc = "0.2"
|
||||
@@ -22,16 +25,19 @@ hmac = "0.12"
|
||||
crc32fast = "1.4"
|
||||
crc32c = "0.6"
|
||||
zeroize = { version = "1.8", features = ["derive"] }
|
||||
subtle = "2.6"
|
||||
static_assertions = "1.1"
|
||||
|
||||
# Network
|
||||
socket2 = { version = "0.5", features = ["all"] }
|
||||
nix = { version = "0.28", default-features = false, features = ["net"] }
|
||||
socket2 = { version = "0.6", features = ["all"] }
|
||||
nix = { version = "0.31", default-features = false, features = ["net", "fs"] }
|
||||
shadowsocks = { version = "1.24", features = ["aead-cipher-2022"] }
|
||||
|
||||
# Serialization
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
toml = "0.8"
|
||||
x509-parser = "0.15"
|
||||
toml = "1.0"
|
||||
x509-parser = "0.18"
|
||||
|
||||
# Utils
|
||||
bytes = "1.9"
|
||||
@@ -39,9 +45,10 @@ thiserror = "2.0"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
parking_lot = "0.12"
|
||||
dashmap = "5.5"
|
||||
dashmap = "6.1"
|
||||
arc-swap = "1.7"
|
||||
lru = "0.16"
|
||||
rand = "0.9"
|
||||
rand = "0.10"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
hex = "0.4"
|
||||
base64 = "0.22"
|
||||
@@ -50,26 +57,32 @@ regex = "1.11"
|
||||
crossbeam-queue = "0.3"
|
||||
num-bigint = "0.4"
|
||||
num-traits = "0.2"
|
||||
x25519-dalek = "2"
|
||||
anyhow = "1.0"
|
||||
|
||||
# HTTP
|
||||
reqwest = { version = "0.12", features = ["rustls-tls"], default-features = false }
|
||||
notify = { version = "6", features = ["macos_fsevent"] }
|
||||
ipnetwork = "0.20"
|
||||
reqwest = { version = "0.13", features = ["rustls"], default-features = false }
|
||||
notify = "8.2"
|
||||
ipnetwork = { version = "0.21", features = ["serde"] }
|
||||
hyper = { version = "1", features = ["server", "http1"] }
|
||||
hyper-util = { version = "0.1", features = ["tokio", "server-auto"] }
|
||||
http-body-util = "0.1"
|
||||
httpdate = "1.0"
|
||||
tokio-rustls = { version = "0.26", default-features = false, features = ["tls12"] }
|
||||
rustls = { version = "0.23", default-features = false, features = ["std", "tls12", "ring"] }
|
||||
webpki-roots = "0.26"
|
||||
webpki-roots = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-test = "0.4"
|
||||
criterion = "0.5"
|
||||
criterion = "0.8"
|
||||
proptest = "1.4"
|
||||
futures = "0.3"
|
||||
|
||||
[[bench]]
|
||||
name = "crypto_bench"
|
||||
harness = false
|
||||
|
||||
[profile.release]
|
||||
lto = "fat"
|
||||
codegen-units = 1
|
||||
|
||||
|
||||
113
Dockerfile
113
Dockerfile
@@ -1,43 +1,98 @@
|
||||
# ==========================
|
||||
# Stage 1: Build
|
||||
# ==========================
|
||||
FROM rust:1.88-slim-bookworm AS builder
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
pkg-config \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
COPY Cargo.toml Cargo.lock* ./
|
||||
RUN mkdir src && echo 'fn main() {}' > src/main.rs && \
|
||||
cargo build --release 2>/dev/null || true && \
|
||||
rm -rf src
|
||||
|
||||
COPY . .
|
||||
RUN cargo build --release && strip target/release/telemt
|
||||
ARG TELEMT_REPOSITORY=telemt/telemt
|
||||
ARG TELEMT_VERSION=latest
|
||||
|
||||
# ==========================
|
||||
# Stage 2: Runtime
|
||||
# Minimal Image
|
||||
# ==========================
|
||||
FROM debian:bookworm-slim
|
||||
FROM debian:12-slim AS minimal
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
ARG TARGETARCH
|
||||
ARG TELEMT_REPOSITORY
|
||||
ARG TELEMT_VERSION
|
||||
|
||||
RUN useradd -r -s /usr/sbin/nologin telemt
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
binutils \
|
||||
ca-certificates \
|
||||
curl \
|
||||
tar; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN set -eux; \
|
||||
case "${TARGETARCH}" in \
|
||||
amd64) ASSET="telemt-x86_64-linux-musl.tar.gz" ;; \
|
||||
arm64) ASSET="telemt-aarch64-linux-musl.tar.gz" ;; \
|
||||
*) echo "Unsupported TARGETARCH: ${TARGETARCH}" >&2; exit 1 ;; \
|
||||
esac; \
|
||||
VERSION="${TELEMT_VERSION#refs/tags/}"; \
|
||||
if [ -z "${VERSION}" ] || [ "${VERSION}" = "latest" ]; then \
|
||||
BASE_URL="https://github.com/${TELEMT_REPOSITORY}/releases/latest/download"; \
|
||||
else \
|
||||
BASE_URL="https://github.com/${TELEMT_REPOSITORY}/releases/download/${VERSION}"; \
|
||||
fi; \
|
||||
curl -fL \
|
||||
--retry 5 \
|
||||
--retry-delay 3 \
|
||||
--connect-timeout 10 \
|
||||
--max-time 120 \
|
||||
-o "/tmp/${ASSET}" \
|
||||
"${BASE_URL}/${ASSET}"; \
|
||||
curl -fL \
|
||||
--retry 5 \
|
||||
--retry-delay 3 \
|
||||
--connect-timeout 10 \
|
||||
--max-time 120 \
|
||||
-o "/tmp/${ASSET}.sha256" \
|
||||
"${BASE_URL}/${ASSET}.sha256"; \
|
||||
cd /tmp; \
|
||||
sha256sum -c "${ASSET}.sha256"; \
|
||||
tar -xzf "${ASSET}" -C /tmp; \
|
||||
test -f /tmp/telemt; \
|
||||
install -m 0755 /tmp/telemt /telemt; \
|
||||
strip --strip-unneeded /telemt || true; \
|
||||
rm -f "/tmp/${ASSET}" "/tmp/${ASSET}.sha256" /tmp/telemt
|
||||
|
||||
# ==========================
|
||||
# Debug Image
|
||||
# ==========================
|
||||
FROM debian:12-slim AS debug
|
||||
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
curl \
|
||||
iproute2 \
|
||||
busybox; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=builder /build/target/release/telemt /app/telemt
|
||||
COPY --from=minimal /telemt /app/telemt
|
||||
COPY config.toml /app/config.toml
|
||||
|
||||
RUN chown -R telemt:telemt /app
|
||||
USER telemt
|
||||
|
||||
EXPOSE 443
|
||||
EXPOSE 9090
|
||||
EXPOSE 443 9090 9091
|
||||
|
||||
ENTRYPOINT ["/app/telemt"]
|
||||
CMD ["config.toml"]
|
||||
|
||||
# ==========================
|
||||
# Production Distroless on MUSL
|
||||
# ==========================
|
||||
FROM gcr.io/distroless/static-debian12 AS prod
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=minimal /telemt /app/telemt
|
||||
COPY config.toml /app/config.toml
|
||||
|
||||
USER nonroot:nonroot
|
||||
|
||||
EXPOSE 443 9090 9091
|
||||
|
||||
ENTRYPOINT ["/app/telemt"]
|
||||
CMD ["config.toml"]
|
||||
|
||||
165
LICENSE
Normal file
165
LICENSE
Normal file
@@ -0,0 +1,165 @@
|
||||
###### TELEMT Public License 3 ######
|
||||
##### Copyright (c) 2026 Telemt #####
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this Software and associated documentation files (the "Software"),
|
||||
to use, reproduce, modify, prepare derivative works of, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
persons to whom the Software is furnished to do so, provided that all
|
||||
copyright notices, license terms, and conditions set forth in this License
|
||||
are preserved and complied with.
|
||||
|
||||
### Official Translations
|
||||
|
||||
The canonical version of this License is the English version.
|
||||
Official translations are provided for informational purposes only
|
||||
and for convenience, and do not have legal force. In case of any
|
||||
discrepancy, the English version of this License shall prevail.
|
||||
Available versions:
|
||||
- English in Markdown: docs/LICENSE/LICENSE.md
|
||||
- German: docs/LICENSE/LICENSE.de.md
|
||||
- Russian: docs/LICENSE/LICENSE.ru.md
|
||||
|
||||
### License Versioning Policy
|
||||
|
||||
This License is version 3 of the TELEMT Public License.
|
||||
Each version of the Software is licensed under the License that
|
||||
accompanies its corresponding source code distribution.
|
||||
|
||||
Future versions of the Software may be distributed under a different
|
||||
version of the TELEMT Public License or under a different license,
|
||||
as determined by the Telemt maintainers.
|
||||
|
||||
Any such change of license applies only to the versions of the
|
||||
Software distributed with the new license and SHALL NOT retroactively
|
||||
affect any previously released versions of the Software.
|
||||
|
||||
Recipients of the Software are granted rights only under the License
|
||||
provided with the version of the Software they received.
|
||||
|
||||
Redistributions of the Software, including Modified Versions, MUST
|
||||
preserve the copyright notices, license text, and conditions of this
|
||||
License for all portions of the Software derived from Telemt.
|
||||
|
||||
Additional terms or licenses may be applied to modifications or
|
||||
additional code added by a redistributor, provided that such terms
|
||||
do not restrict or alter the rights granted under this License for
|
||||
the original Telemt Software.
|
||||
|
||||
Nothing in this section limits the rights granted under this License
|
||||
for versions of the Software already released.
|
||||
|
||||
### Definitions
|
||||
|
||||
For the purposes of this License:
|
||||
- "Software" means the Telemt software, including source code, documentation,
|
||||
and any associated files distributed under this License.
|
||||
- "Contributor" means any person or entity that submits code, patches,
|
||||
documentation, or other contributions to the Software that are accepted
|
||||
into the Software by the maintainers.
|
||||
- "Contribution" means any work of authorship intentionally submitted
|
||||
to the Software for inclusion in the Software.
|
||||
- "Modified Version" means any version of the Software that has been
|
||||
changed, adapted, extended, or otherwise modified from the original
|
||||
Software.
|
||||
- "Maintainers" means the individuals or entities responsible for
|
||||
the official Telemt project and its releases.
|
||||
|
||||
#### 1 Attribution
|
||||
|
||||
Redistributions of the Software, in source or binary form, MUST RETAIN the
|
||||
above copyright notice, this license text, and any existing attribution
|
||||
notices.
|
||||
|
||||
#### 2 Modification Notice
|
||||
|
||||
If you modify the Software, you MUST clearly state that the Software has been
|
||||
modified and include a brief description of the changes made.
|
||||
|
||||
Modified versions MUST NOT be presented as the original Telemt.
|
||||
|
||||
#### 3 Trademark and Branding
|
||||
|
||||
This license DOES NOT grant permission to use the name "Telemt",
|
||||
the Telemt logo, or any Telemt trademarks or branding.
|
||||
|
||||
Redistributed or modified versions of the Software MAY NOT use the Telemt
|
||||
name in a way that suggests endorsement or official origin without explicit
|
||||
permission from the Telemt maintainers.
|
||||
|
||||
Use of the name "Telemt" to describe a modified version of the Software
|
||||
is permitted only if the modified version is clearly identified as a
|
||||
modified or unofficial version.
|
||||
|
||||
Any distribution that could reasonably confuse users into believing that
|
||||
the software is an official Telemt release is prohibited.
|
||||
|
||||
#### 4 Binary Distribution Transparency
|
||||
|
||||
If you distribute compiled binaries of the Software,
|
||||
you are ENCOURAGED to provide access to the corresponding
|
||||
source code and build instructions where reasonably possible.
|
||||
|
||||
This helps preserve transparency and allows recipients to verify the
|
||||
integrity and reproducibility of distributed builds.
|
||||
|
||||
#### 5 Patent Grant and Defensive Termination Clause
|
||||
|
||||
Each contributor grants you a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Software.
|
||||
|
||||
This patent license applies only to those patent claims necessarily
|
||||
infringed by the contributor’s contribution alone or by combination of
|
||||
their contribution with the Software.
|
||||
|
||||
If you initiate or participate in any patent litigation, including
|
||||
cross-claims or counterclaims, alleging that the Software or any
|
||||
contribution incorporated within the Software constitutes patent
|
||||
infringement, then **all rights granted to you under this license shall
|
||||
terminate immediately** as of the date such litigation is filed.
|
||||
|
||||
Additionally, if you initiate legal action alleging that the
|
||||
Software itself infringes your patent or other intellectual
|
||||
property rights, then all rights granted to you under this
|
||||
license SHALL TERMINATE automatically.
|
||||
|
||||
#### 6 Contributions
|
||||
|
||||
Unless you explicitly state otherwise, any Contribution intentionally
|
||||
submitted for inclusion in the Software shall be licensed under the terms
|
||||
of this License.
|
||||
|
||||
By submitting a Contribution, you grant the Telemt maintainers and all
|
||||
recipients of the Software the rights described in this License with
|
||||
respect to that Contribution.
|
||||
|
||||
#### 7 Network Use Attribution
|
||||
|
||||
If the Software is used to provide a publicly accessible network service,
|
||||
the operator of such service SHOULD provide attribution to Telemt in at least
|
||||
one of the following locations:
|
||||
|
||||
- service documentation
|
||||
- service description
|
||||
- an "About" or similar informational page
|
||||
- other user-visible materials reasonably associated with the service
|
||||
|
||||
Such attribution MUST NOT imply endorsement by the Telemt project or its
|
||||
maintainers.
|
||||
|
||||
#### 8 Disclaimer of Warranty and Severability Clause
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
USE OR OTHER DEALINGS IN THE SOFTWARE
|
||||
|
||||
IF ANY PROVISION OF THIS LICENSE IS HELD TO BE INVALID OR UNENFORCEABLE,
|
||||
SUCH PROVISION SHALL BE INTERPRETED TO REFLECT THE ORIGINAL INTENT
|
||||
OF THE PARTIES AS CLOSELY AS POSSIBLE, AND THE REMAINING PROVISIONS
|
||||
SHALL REMAIN IN FULL FORCE AND EFFECT
|
||||
19
LICENSING.md
19
LICENSING.md
@@ -1,17 +1,12 @@
|
||||
# LICENSING
|
||||
## Licenses for Versions
|
||||
| Version | License |
|
||||
|---------|---------------|
|
||||
| 1.0 | NO LICNESE |
|
||||
| 1.1 | NO LICENSE |
|
||||
| 1.2 | NO LICENSE |
|
||||
| 2.0 | NO LICENSE |
|
||||
| 3.0 | TELEMT UL 1 |
|
||||
| Version ≥ | Version ≤ | License |
|
||||
|-----------|-----------|---------------|
|
||||
| 1.0 | 3.3.17 | NO LICNESE |
|
||||
| 3.3.18 | 3.4.0 | TELEMT PL 3 |
|
||||
|
||||
### License Types
|
||||
- **NO LICENSE** = ***ALL RIGHT RESERVED***
|
||||
- **TELEMT UL1** - work in progress license for source code of `telemt`, which encourages:
|
||||
- fair use,
|
||||
- contributions,
|
||||
- distribution,
|
||||
- but prohibits NOT mentioning the authors
|
||||
- **TELEMT PL** - special Telemt Public License based on Apache License 2 principles
|
||||
|
||||
## [Telemt Public License 3](https://github.com/telemt/telemt/blob/main/LICENSE)
|
||||
|
||||
271
README.md
271
README.md
@@ -2,7 +2,12 @@
|
||||
|
||||
***Löst Probleme, bevor andere überhaupt wissen, dass sie existieren*** / ***It solves problems before others even realize they exist***
|
||||
|
||||
**Telemt** is a fast, secure, and feature-rich server written in Rust: it fully implements the official Telegram proxy algo and adds many production-ready improvements such as connection pooling, replay protection, detailed statistics, masking from "prying" eyes
|
||||
**Telemt** is a fast, secure, and feature-rich server written in Rust: it fully implements the official Telegram proxy algo and adds many production-ready improvements such as:
|
||||
- [ME Pool + Reader/Writer + Registry + Refill + Adaptive Floor + Trio-State + Generation Lifecycle](https://github.com/telemt/telemt/blob/main/docs/model/MODEL.en.md)
|
||||
- [Full-covered API w/ management](https://github.com/telemt/telemt/blob/main/docs/API.md)
|
||||
- Anti-Replay on Sliding Window
|
||||
- Prometheus-format Metrics
|
||||
- TLS-Fronting and TCP-Splicing for masking from "prying" eyes
|
||||
|
||||
[**Telemt Chat in Telegram**](https://t.me/telemtrs)
|
||||
|
||||
@@ -14,18 +19,11 @@
|
||||
|
||||
### 🇷🇺 RU
|
||||
|
||||
#### Релиз 3.0.15 — 25 февраля
|
||||
#### О релизах
|
||||
|
||||
25 февраля мы выпустили версию **3.0.15**
|
||||
[3.3.27](https://github.com/telemt/telemt/releases/tag/3.3.27) даёт баланс стабильности и передового функционала, а так же последние исправления по безопасности и багам
|
||||
|
||||
Мы предполагаем, что она станет завершающей версией поколения 3.0 и уже сейчас мы рассматриваем её как **LTS-кандидата** для версии **3.1.0**!
|
||||
|
||||
После нескольких дней детального анализа особенностей работы Middle-End мы спроектировали и реализовали продуманный режим **ротации ME Writer**. Данный режим позволяет поддерживать стабильно высокую производительность в long-run сценариях без возникновения ошибок, связанных с некорректной конфигурацией прокси
|
||||
|
||||
Будем рады вашему фидбеку и предложениям по улучшению — особенно в части **статистики** и **UX**
|
||||
|
||||
Релиз:
|
||||
[3.0.15](https://github.com/telemt/telemt/releases/tag/3.0.15)
|
||||
Будем рады вашему фидбеку и предложениям по улучшению — особенно в части **API**, **статистики**, **UX**
|
||||
|
||||
---
|
||||
|
||||
@@ -42,18 +40,11 @@
|
||||
|
||||
### 🇬🇧 EN
|
||||
|
||||
#### Release 3.0.15 — February 25
|
||||
#### About releases
|
||||
|
||||
On February 25, we released version **3.0.15**
|
||||
[3.3.27](https://github.com/telemt/telemt/releases/tag/3.3.27) provides a balance of stability and advanced functionality, as well as the latest security and bug fixes
|
||||
|
||||
We expect this to become the final release of the 3.0 generation and at this point, we already see it as a strong **LTS candidate** for the upcoming **3.1.0** release!
|
||||
|
||||
After several days of deep analysis of Middle-End behavior, we designed and implemented a well-engineered **ME Writer rotation mode**. This mode enables sustained high throughput in long-run scenarios while preventing proxy misconfiguration errors
|
||||
|
||||
We are looking forward to your feedback and improvement proposals — especially regarding **statistics** and **UX**
|
||||
|
||||
Release:
|
||||
[3.0.15](https://github.com/telemt/telemt/releases/tag/3.0.15)
|
||||
We are looking forward to your feedback and improvement proposals — especially regarding **API**, **statistics**, **UX**
|
||||
|
||||
---
|
||||
|
||||
@@ -76,31 +67,6 @@ We welcome ideas, architectural feedback, and pull requests.
|
||||
|
||||
⚓ Our ***Middle-End Pool*** is fastest by design in standard scenarios, compared to other implementations of connecting to the Middle-End Proxy: non dramatically, but usual
|
||||
|
||||
# GOTO
|
||||
- [Features](#features)
|
||||
- [Quick Start Guide](#quick-start-guide)
|
||||
- [How to use?](#how-to-use)
|
||||
- [Systemd Method](#telemt-via-systemd)
|
||||
- [Configuration](#configuration)
|
||||
- [Minimal Configuration](#minimal-configuration-for-first-start)
|
||||
- [Advanced](#advanced)
|
||||
- [Adtag](#adtag)
|
||||
- [Listening and Announce IPs](#listening-and-announce-ips)
|
||||
- [Upstream Manager](#upstream-manager)
|
||||
- [IP](#bind-on-ip)
|
||||
- [SOCKS](#socks45-as-upstream)
|
||||
- [FAQ](#faq)
|
||||
- [Recognizability for DPI + crawler](#recognizability-for-dpi-and-crawler)
|
||||
- [Telegram Calls](#telegram-calls-via-mtproxy)
|
||||
- [DPI](#how-does-dpi-see-mtproxy-tls)
|
||||
- [Whitelist on Network Level](#whitelist-on-ip)
|
||||
- [Too many open files](#too-many-open-files)
|
||||
- [Build](#build)
|
||||
- [Docker](#docker)
|
||||
- [Why Rust?](#why-rust)
|
||||
|
||||
## Features
|
||||
|
||||
- Full support for all official MTProto proxy modes:
|
||||
- Classic
|
||||
- Secure - with `dd` prefix
|
||||
@@ -111,158 +77,31 @@ We welcome ideas, architectural feedback, and pull requests.
|
||||
- Graceful shutdown on Ctrl+C
|
||||
- Extensive logging via `trace` and `debug` with `RUST_LOG` method
|
||||
|
||||
# GOTO
|
||||
- [Quick Start Guide](#quick-start-guide)
|
||||
- [FAQ](#faq)
|
||||
- [Recognizability for DPI and crawler](#recognizability-for-dpi-and-crawler)
|
||||
- [Client WITH secret-key accesses the MTProxy resource:](#client-with-secret-key-accesses-the-mtproxy-resource)
|
||||
- [Client WITHOUT secret-key gets transparent access to the specified resource:](#client-without-secret-key-gets-transparent-access-to-the-specified-resource)
|
||||
- [Telegram Calls via MTProxy](#telegram-calls-via-mtproxy)
|
||||
- [How does DPI see MTProxy TLS?](#how-does-dpi-see-mtproxy-tls)
|
||||
- [Whitelist on IP](#whitelist-on-ip)
|
||||
- [Too many open files](#too-many-open-files)
|
||||
- [Build](#build)
|
||||
- [Why Rust?](#why-rust)
|
||||
- [Issues](#issues)
|
||||
- [Roadmap](#roadmap)
|
||||
|
||||
|
||||
## Quick Start Guide
|
||||
**This software is designed for Debian-based OS: in addition to Debian, these are Ubuntu, Mint, Kali, MX and many other Linux**
|
||||
1. Download release
|
||||
```bash
|
||||
wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz
|
||||
```
|
||||
2. Move to Bin Folder
|
||||
```bash
|
||||
mv telemt /bin
|
||||
```
|
||||
4. Make Executable
|
||||
```bash
|
||||
chmod +x /bin/telemt
|
||||
```
|
||||
5. Go to [How to use?](#how-to-use) section for for further steps
|
||||
|
||||
## How to use?
|
||||
### Telemt via Systemd
|
||||
**This instruction "assume" that you:**
|
||||
- logged in as root or executed `su -` / `sudo su`
|
||||
- you already have an assembled and executable `telemt` in /bin folder as a result of the [Quick Start Guide](#quick-start-guide) or [Build](#build)
|
||||
|
||||
**0. Check port and generate secrets**
|
||||
|
||||
The port you have selected for use should be MISSING from the list, when:
|
||||
```bash
|
||||
netstat -lnp
|
||||
```
|
||||
|
||||
Generate 16 bytes/32 characters HEX with OpenSSL or another way:
|
||||
```bash
|
||||
openssl rand -hex 16
|
||||
```
|
||||
OR
|
||||
```bash
|
||||
xxd -l 16 -p /dev/urandom
|
||||
```
|
||||
OR
|
||||
```bash
|
||||
python3 -c 'import os; print(os.urandom(16).hex())'
|
||||
```
|
||||
|
||||
**1. Place your config to /etc/telemt.toml**
|
||||
|
||||
Open nano
|
||||
```bash
|
||||
nano /etc/telemt.toml
|
||||
```
|
||||
paste your config from [Configuration](#configuration) section
|
||||
|
||||
then Ctrl+X -> Y -> Enter to save
|
||||
|
||||
**2. Create service on /etc/systemd/system/telemt.service**
|
||||
|
||||
Open nano
|
||||
```bash
|
||||
nano /etc/systemd/system/telemt.service
|
||||
```
|
||||
paste this Systemd Module
|
||||
```bash
|
||||
[Unit]
|
||||
Description=Telemt
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/bin
|
||||
ExecStart=/bin/telemt /etc/telemt.toml
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
then Ctrl+X -> Y -> Enter to save
|
||||
|
||||
**3.** In Shell type `systemctl start telemt` - it must start with zero exit-code
|
||||
|
||||
**4.** In Shell type `systemctl status telemt` - there you can reach info about current MTProxy status
|
||||
|
||||
**5.** In Shell type `systemctl enable telemt` - then telemt will start with system startup, after the network is up
|
||||
|
||||
**6.** In Shell type `journalctl -u telemt -n -g "links" --no-pager -o cat | tac` - get the connection links
|
||||
|
||||
## Configuration
|
||||
### Minimal Configuration for First Start
|
||||
```toml
|
||||
# === General Settings ===
|
||||
[general]
|
||||
# ad_tag = "00000000000000000000000000000000"
|
||||
|
||||
[general.modes]
|
||||
classic = false
|
||||
secure = false
|
||||
tls = true
|
||||
|
||||
# === Anti-Censorship & Masking ===
|
||||
[censorship]
|
||||
tls_domain = "petrovich.ru"
|
||||
|
||||
[access.users]
|
||||
# format: "username" = "32_hex_chars_secret"
|
||||
hello = "00000000000000000000000000000000"
|
||||
|
||||
```
|
||||
### Advanced
|
||||
#### Adtag (per-user)
|
||||
To use channel advertising and usage statistics from Telegram, get an Adtag from [@mtproxybot](https://t.me/mtproxybot). Set it per user in `[access.user_ad_tags]` (32 hex chars):
|
||||
```toml
|
||||
[access.user_ad_tags]
|
||||
username1 = "11111111111111111111111111111111" # Replace with your tag from @mtproxybot
|
||||
username2 = "22222222222222222222222222222222"
|
||||
```
|
||||
#### Listening and Announce IPs
|
||||
To specify listening address and/or address in links, add to section `[[server.listeners]]` of config.toml:
|
||||
```toml
|
||||
[[server.listeners]]
|
||||
ip = "0.0.0.0" # 0.0.0.0 = all IPs; your IP = specific listening
|
||||
announce_ip = "1.2.3.4" # IP in links; comment with # if not used
|
||||
```
|
||||
#### Upstream Manager
|
||||
To specify upstream, add to section `[[upstreams]]` of config.toml:
|
||||
##### Bind on IP
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 1
|
||||
enabled = true
|
||||
interface = "192.168.1.100" # Change to your outgoing IP
|
||||
```
|
||||
##### SOCKS4/5 as Upstream
|
||||
- Without Auth:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
enabled = true
|
||||
```
|
||||
|
||||
- With Auth:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||
username = "user" # Username for Auth on SOCKS-server
|
||||
password = "pass" # Password for Auth on SOCKS-server
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
enabled = true
|
||||
```
|
||||
- [Quick Start Guide RU](docs/QUICK_START_GUIDE.ru.md)
|
||||
- [Quick Start Guide EN](docs/QUICK_START_GUIDE.en.md)
|
||||
|
||||
## FAQ
|
||||
|
||||
- [FAQ RU](docs/FAQ.ru.md)
|
||||
- [FAQ EN](docs/FAQ.en.md)
|
||||
|
||||
### Recognizability for DPI and crawler
|
||||
Since version 1.1.0.0, we have debugged masking perfectly: for all clients without "presenting" a key,
|
||||
we transparently direct traffic to the target host!
|
||||
@@ -399,6 +238,11 @@ git clone https://github.com/telemt/telemt
|
||||
cd telemt
|
||||
# Starting Release Build
|
||||
cargo build --release
|
||||
|
||||
# Low-RAM devices (1 GB, e.g. NanoPi Neo3 / Raspberry Pi Zero 2):
|
||||
# release profile uses lto = "thin" to reduce peak linker memory.
|
||||
# If your custom toolchain overrides profiles, avoid enabling fat LTO.
|
||||
|
||||
# Move to /bin
|
||||
mv ./target/release/telemt /bin
|
||||
# Make executable
|
||||
@@ -407,40 +251,11 @@ chmod +x /bin/telemt
|
||||
telemt config.toml
|
||||
```
|
||||
|
||||
## Docker
|
||||
**Quick start (Docker Compose)**
|
||||
### OpenBSD
|
||||
- Build and service setup guide: [OpenBSD Guide (EN)](docs/OPENBSD.en.md)
|
||||
- Example rc.d script: [contrib/openbsd/telemt.rcd](contrib/openbsd/telemt.rcd)
|
||||
- Status: OpenBSD sandbox hardening with `pledge(2)` and `unveil(2)` is not implemented yet.
|
||||
|
||||
1. Edit `config.toml` in repo root (at least: port, users secrets, tls_domain)
|
||||
2. Start container:
|
||||
```bash
|
||||
docker compose up -d --build
|
||||
```
|
||||
3. Check logs:
|
||||
```bash
|
||||
docker compose logs -f telemt
|
||||
```
|
||||
4. Stop:
|
||||
```bash
|
||||
docker compose down
|
||||
```
|
||||
|
||||
**Notes**
|
||||
- `docker-compose.yml` maps `./config.toml` to `/app/config.toml` (read-only)
|
||||
- By default it publishes `443:443` and runs with dropped capabilities (only `NET_BIND_SERVICE` is added)
|
||||
- If you really need host networking (usually only for some IPv6 setups) uncomment `network_mode: host`
|
||||
|
||||
**Run without Compose**
|
||||
```bash
|
||||
docker build -t telemt:local .
|
||||
docker run --name telemt --restart unless-stopped \
|
||||
-p 443:443 \
|
||||
-e RUST_LOG=info \
|
||||
-v "$PWD/config.toml:/app/config.toml:ro" \
|
||||
--read-only \
|
||||
--cap-drop ALL --cap-add NET_BIND_SERVICE \
|
||||
--ulimit nofile=65536:65536 \
|
||||
telemt:local
|
||||
```
|
||||
|
||||
## Why Rust?
|
||||
- Long-running reliability and idempotent behavior
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Cryptobench
|
||||
use criterion::{black_box, criterion_group, Criterion};
|
||||
use criterion::{Criterion, black_box, criterion_group};
|
||||
|
||||
fn bench_aes_ctr(c: &mut Criterion) {
|
||||
c.bench_function("aes_ctr_encrypt_64kb", |b| {
|
||||
@@ -9,4 +9,4 @@ fn bench_aes_ctr(c: &mut Criterion) {
|
||||
black_box(enc.encrypt(&data))
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
697
config.full.toml
697
config.full.toml
@@ -1,697 +0,0 @@
|
||||
# ==============================================================================
|
||||
#
|
||||
# TELEMT — Advanced Rust-based Telegram MTProto Proxy
|
||||
# Full Configuration Reference
|
||||
#
|
||||
# This file is both a working config and a complete documentation.
|
||||
# Every parameter is explained. Read it top to bottom before deploying.
|
||||
#
|
||||
# Quick Start:
|
||||
# 1. Set [server].port to your desired port (443 recommended)
|
||||
# 2. Generate a secret: openssl rand -hex 16
|
||||
# 3. Put it in [access.users] under a name you choose
|
||||
# 4. Set [censorship].tls_domain to a popular unblocked HTTPS site
|
||||
# 5. Set your public IP in [general].middle_proxy_nat_ip
|
||||
# and [general.links].public_host
|
||||
# 6. Set announce IP in [[server.listeners]]
|
||||
# 7. Run Telemt. It prints a tg:// link. Send it to your users.
|
||||
#
|
||||
# Modes of Operation:
|
||||
# Direct Mode (use_middle_proxy = false)
|
||||
# Connects straight to Telegram DCs via TCP. Simple, fast, low overhead.
|
||||
# No ad_tag support. No CDN DC support (203, etc).
|
||||
#
|
||||
# Middle-Proxy Mode (use_middle_proxy = true)
|
||||
# Connects to Telegram Middle-End servers via RPC protocol.
|
||||
# Required for ad_tag monetization and CDN support.
|
||||
# Requires proxy_secret_path and a valid public IP.
|
||||
#
|
||||
# ==============================================================================
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# LEGACY TOP-LEVEL FIELDS
|
||||
# ==============================================================================
|
||||
|
||||
# Deprecated. Use [general.links].show instead.
|
||||
# Accepts "*" for all users, or an array like ["alice", "bob"].
|
||||
show_link = ["0"]
|
||||
|
||||
# Fallback Datacenter index (1-5) when a client requests an unknown DC ID.
|
||||
# DC 2 is Amsterdam (Europe), closest for most CIS users.
|
||||
# default_dc = 2
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# GENERAL SETTINGS
|
||||
# ==============================================================================
|
||||
|
||||
[general]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Core Protocol
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Coalesce the MTProto handshake and first data payload into a single TCP packet.
|
||||
# Significantly reduces connection latency. No reason to disable.
|
||||
fast_mode = true
|
||||
|
||||
# How the proxy connects to Telegram servers.
|
||||
# false = Direct TCP to Telegram DCs (simple, low overhead)
|
||||
# true = Middle-End RPC protocol (required for ad_tag and CDN DCs)
|
||||
use_middle_proxy = true
|
||||
|
||||
# 32-char hex Ad-Tag from @MTProxybot for sponsored channel injection.
|
||||
# Only works when use_middle_proxy = true.
|
||||
# Obtain yours: message @MTProxybot on Telegram, register your proxy.
|
||||
# ad_tag = "00000000000000000000000000000000"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Middle-End Authentication
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Path to the Telegram infrastructure AES key file.
|
||||
# Auto-downloaded from https://core.telegram.org/getProxySecret on first run.
|
||||
# This key authenticates your proxy with Middle-End servers.
|
||||
proxy_secret_path = "proxy-secret"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public IP Configuration (Critical for Middle-Proxy Mode)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Your server's PUBLIC IPv4 address.
|
||||
# Middle-End servers need this for the cryptographic Key Derivation Function.
|
||||
# If your server has a direct public IP, set it here.
|
||||
# If behind NAT (AWS, Docker, etc.), this MUST be your external IP.
|
||||
# If omitted, Telemt uses STUN to auto-detect (see middle_proxy_nat_probe).
|
||||
# middle_proxy_nat_ip = "203.0.113.10"
|
||||
|
||||
# Auto-detect public IP via STUN servers defined in [network].
|
||||
# Set to false if you hardcoded middle_proxy_nat_ip above.
|
||||
# Set to true if you want automatic detection.
|
||||
middle_proxy_nat_probe = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Middle-End Connection Pool
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Number of persistent multiplexed RPC connections to ME servers.
|
||||
# All client traffic is routed through these "fat pipes".
|
||||
# 8 handles thousands of concurrent users comfortably.
|
||||
middle_proxy_pool_size = 8
|
||||
|
||||
# Legacy field. Connections kept initialized but idle as warm standby.
|
||||
middle_proxy_warm_standby = 16
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Middle-End Keepalive
|
||||
# Telegram ME servers aggressively kill idle TCP connections.
|
||||
# These settings send periodic RPC_PING frames to keep pipes alive.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
me_keepalive_enabled = true
|
||||
|
||||
# Base interval between pings in seconds.
|
||||
me_keepalive_interval_secs = 25
|
||||
|
||||
# Random jitter added to interval to prevent all connections pinging simultaneously.
|
||||
me_keepalive_jitter_secs = 5
|
||||
|
||||
# Randomize ping payload bytes to prevent DPI from fingerprinting ping patterns.
|
||||
me_keepalive_payload_random = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Client-Side Limits
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Max buffered ciphertext per client (bytes) when upstream is slow.
|
||||
# Acts as backpressure to prevent memory exhaustion. 256KB is safe.
|
||||
crypto_pending_buffer = 262144
|
||||
|
||||
# Maximum single MTProto frame size from client. 16MB is protocol standard.
|
||||
max_client_frame = 16777216
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Crypto Desynchronization Logging
|
||||
# Desync errors usually mean DPI/GFW is tampering with connections.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# true = full forensics (trace ID, IP hash, hex dumps) for EVERY desync event
|
||||
# false = deduplicated logging, one entry per time window (prevents log spam)
|
||||
# Set true if you are actively debugging DPI interference.
|
||||
desync_all_full = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Beobachten — Built-in Honeypot / Active Probe Tracker
|
||||
# Tracks IPs that fail handshakes or behave like TLS scanners.
|
||||
# Output file can be fed into fail2ban or iptables for auto-blocking.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
beobachten = true
|
||||
|
||||
# How long (minutes) to remember a suspicious IP before expiring it.
|
||||
beobachten_minutes = 30
|
||||
|
||||
# How often (seconds) to flush tracker state to disk.
|
||||
beobachten_flush_secs = 15
|
||||
|
||||
# File path for the tracker output.
|
||||
beobachten_file = "cache/beobachten.txt"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Hardswap — Zero-Downtime ME Pool Rotation
|
||||
# When Telegram updates ME server IPs, Hardswap creates a completely new pool,
|
||||
# waits until it is fully ready, migrates traffic, then kills the old pool.
|
||||
# Users experience zero interruption.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
hardswap = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ME Pool Warmup Staggering
|
||||
# When creating a new pool, connections are opened one by one with delays
|
||||
# to avoid a burst of SYN packets that could trigger ISP flood protection.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
me_warmup_stagger_enabled = true
|
||||
|
||||
# Delay between each connection creation (milliseconds).
|
||||
me_warmup_step_delay_ms = 500
|
||||
|
||||
# Random jitter added to the delay (milliseconds).
|
||||
me_warmup_step_jitter_ms = 300
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ME Reconnect Backoff
|
||||
# If an ME server drops the connection, Telemt retries with this strategy.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Max simultaneous reconnect attempts per DC.
|
||||
me_reconnect_max_concurrent_per_dc = 8
|
||||
|
||||
# Exponential backoff base (milliseconds).
|
||||
me_reconnect_backoff_base_ms = 500
|
||||
|
||||
# Backoff ceiling (milliseconds). Will never wait longer than this.
|
||||
me_reconnect_backoff_cap_ms = 30000
|
||||
|
||||
# Number of instant retries before switching to exponential backoff.
|
||||
me_reconnect_fast_retry_count = 12
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# NAT Mismatch Behavior
|
||||
# If STUN-detected IP differs from local interface IP (you are behind NAT).
|
||||
# false = abort ME mode (safe default)
|
||||
# true = force ME mode anyway (use if you know your NAT setup is correct)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
stun_iface_mismatch_ignore = false
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Logging
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# File to log unknown DC requests (DC IDs outside standard 1-5).
|
||||
unknown_dc_log_path = "unknown-dc.txt"
|
||||
|
||||
# Verbosity: "debug" | "verbose" | "normal" | "silent"
|
||||
log_level = "normal"
|
||||
|
||||
# Disable ANSI color codes in log output (useful for file logging).
|
||||
disable_colors = false
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# FakeTLS Record Sizing
|
||||
# Buffer small MTProto packets into larger TLS records to mimic real HTTPS.
|
||||
# Real HTTPS servers send records close to MTU size (~1400 bytes).
|
||||
# A stream of tiny TLS records is a strong DPI signal.
|
||||
# Set to 0 to disable. Set to 1400 for realistic HTTPS emulation.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
fast_mode_min_tls_record = 1400
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Periodic Updates
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# How often (seconds) to re-fetch ME server lists and proxy secrets
|
||||
# from core.telegram.org. Keeps your proxy in sync with Telegram infrastructure.
|
||||
update_every = 300
|
||||
|
||||
# How often (seconds) to force a Hardswap even if the ME map is unchanged.
|
||||
# Shorter intervals mean shorter-lived TCP flows, harder for DPI to profile.
|
||||
me_reinit_every_secs = 600
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Hardswap Warmup Tuning
|
||||
# Fine-grained control over how the new pool is warmed up before traffic switch.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
me_hardswap_warmup_delay_min_ms = 1000
|
||||
me_hardswap_warmup_delay_max_ms = 2000
|
||||
me_hardswap_warmup_extra_passes = 3
|
||||
me_hardswap_warmup_pass_backoff_base_ms = 500
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Config Update Debouncing
|
||||
# Telegram sometimes pushes transient/broken configs. Debouncing requires
|
||||
# N consecutive identical fetches before applying a change.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# ME server list must be identical for this many fetches before applying.
|
||||
me_config_stable_snapshots = 2
|
||||
|
||||
# Minimum seconds between config applications.
|
||||
me_config_apply_cooldown_secs = 300
|
||||
|
||||
# Proxy secret must be identical for this many fetches before applying.
|
||||
proxy_secret_stable_snapshots = 2
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Proxy Secret Rotation
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Apply newly downloaded secrets at runtime without restart.
|
||||
proxy_secret_rotate_runtime = true
|
||||
|
||||
# Maximum acceptable secret length (bytes). Rejects abnormally large secrets.
|
||||
proxy_secret_len_max = 256
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Hardswap Drain Settings
|
||||
# Controls graceful shutdown of old ME connections during pool rotation.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Seconds to keep old connections alive for in-flight data before force-closing.
|
||||
me_pool_drain_ttl_secs = 90
|
||||
|
||||
# Minimum ratio of healthy connections in new pool before draining old pool.
|
||||
# 0.8 = at least 80% of new pool must be ready.
|
||||
me_pool_min_fresh_ratio = 0.8
|
||||
|
||||
# Maximum seconds to wait for drain to complete before force-killing.
|
||||
me_reinit_drain_timeout_secs = 120
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# NTP Clock Check
|
||||
# MTProto uses timestamps. Clock drift > 30 seconds breaks handshakes.
|
||||
# Telemt checks on startup and warns if out of sync.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
ntp_check = true
|
||||
ntp_servers = ["pool.ntp.org"]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Auto-Degradation
|
||||
# If ME servers become completely unreachable (ISP blocking),
|
||||
# automatically fall back to Direct Mode so users stay connected.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
auto_degradation_enabled = true
|
||||
|
||||
# Number of DC groups that must be unreachable before triggering fallback.
|
||||
degradation_min_unavailable_dc_groups = 2
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# ALLOWED CLIENT PROTOCOLS
|
||||
# Only enable what you need. In censored regions, TLS-only is safest.
|
||||
# ==============================================================================
|
||||
|
||||
[general.modes]
|
||||
|
||||
# Classic MTProto. Unobfuscated length prefixes. Trivially detected by DPI.
|
||||
# No reason to enable unless you have ancient clients.
|
||||
classic = false
|
||||
|
||||
# Obfuscated MTProto with randomized padding. Better than classic, but
|
||||
# still detectable by statistical analysis of packet sizes.
|
||||
secure = false
|
||||
|
||||
# FakeTLS (ee-secrets). Wraps MTProto in TLS 1.3 framing.
|
||||
# To DPI, it looks like a normal HTTPS connection.
|
||||
# This should be the ONLY enabled mode in censored environments.
|
||||
tls = true
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# STARTUP LINK GENERATION
|
||||
# Controls what tg:// invite links are printed to console on startup.
|
||||
# ==============================================================================
|
||||
|
||||
[general.links]
|
||||
|
||||
# Which users to generate links for.
|
||||
# "*" = all users, or an array like ["alice", "bob"].
|
||||
show = "*"
|
||||
|
||||
# IP or domain to embed in the tg:// link.
|
||||
# If omitted, Telemt uses STUN to auto-detect.
|
||||
# Set this to your server's public IP or domain for reliable links.
|
||||
# public_host = "proxy.example.com"
|
||||
|
||||
# Port to embed in the tg:// link.
|
||||
# If omitted, uses [server].port.
|
||||
# public_port = 443
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# NETWORK & IP RESOLUTION
|
||||
# ==============================================================================
|
||||
|
||||
[network]
|
||||
|
||||
# Enable IPv4 for outbound connections to Telegram.
|
||||
ipv4 = true
|
||||
|
||||
# Enable IPv6 for outbound connections to Telegram.
|
||||
ipv6 = false
|
||||
|
||||
# Prefer IPv4 (4) or IPv6 (6) when both are available.
|
||||
prefer = 4
|
||||
|
||||
# Experimental: use both IPv4 and IPv6 ME servers simultaneously.
|
||||
# May improve reliability but doubles connection count.
|
||||
multipath = false
|
||||
|
||||
# STUN servers for external IP discovery.
|
||||
# Used for Middle-Proxy KDF (if nat_probe=true) and link generation.
|
||||
stun_servers = [
|
||||
"stun.l.google.com:5349",
|
||||
"stun1.l.google.com:3478",
|
||||
"stun.gmx.net:3478",
|
||||
"stun.l.google.com:19302"
|
||||
]
|
||||
|
||||
# If UDP STUN is blocked, attempt TCP-based STUN as fallback.
|
||||
stun_tcp_fallback = true
|
||||
|
||||
# If all STUN fails, use HTTP APIs to discover public IP.
|
||||
http_ip_detect_urls = [
|
||||
"https://ifconfig.me/ip",
|
||||
"https://api.ipify.org"
|
||||
]
|
||||
|
||||
# Cache discovered public IP to this file to survive restarts.
|
||||
cache_public_ip_path = "cache/public_ip.txt"
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# SERVER BINDING & METRICS
|
||||
# ==============================================================================
|
||||
|
||||
[server]
|
||||
|
||||
# TCP port to listen on.
|
||||
# 443 is recommended (looks like normal HTTPS traffic).
|
||||
port = 443
|
||||
|
||||
# IPv4 bind address. "0.0.0.0" = all interfaces.
|
||||
listen_addr_ipv4 = "0.0.0.0"
|
||||
|
||||
# IPv6 bind address. "::" = all interfaces.
|
||||
listen_addr_ipv6 = "::"
|
||||
|
||||
# Unix socket listener (for reverse proxy setups with Nginx/HAProxy).
|
||||
# listen_unix_sock = "/var/run/telemt.sock"
|
||||
# listen_unix_sock_perm = "0660"
|
||||
|
||||
# Enable PROXY protocol header parsing.
|
||||
# Set true ONLY if Telemt is behind HAProxy/Nginx that injects PROXY headers.
|
||||
# If enabled without a proxy in front, clients will fail to connect.
|
||||
proxy_protocol = false
|
||||
|
||||
# Prometheus metrics HTTP endpoint port.
|
||||
# Uncomment to enable. Access at http://your-server:9090/metrics
|
||||
# metrics_port = 9090
|
||||
|
||||
# IP ranges allowed to access the metrics endpoint.
|
||||
metrics_whitelist = [
|
||||
"127.0.0.1/32",
|
||||
"::1/128"
|
||||
]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Listener Overrides
|
||||
# Define explicit listeners with specific bind IPs and announce IPs.
|
||||
# The announce IP is what gets embedded in tg:// links and sent to ME servers.
|
||||
# You MUST set announce to your server's public IP for ME mode to work.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# [[server.listeners]]
|
||||
# ip = "0.0.0.0"
|
||||
# announce = "203.0.113.10"
|
||||
# reuse_allow = false
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# TIMEOUTS (seconds unless noted)
|
||||
# ==============================================================================
|
||||
|
||||
[timeouts]
|
||||
|
||||
# Maximum time for client to complete FakeTLS + MTProto handshake.
|
||||
client_handshake = 15
|
||||
|
||||
# Maximum time to establish TCP connection to upstream Telegram DC.
|
||||
tg_connect = 10
|
||||
|
||||
# TCP keepalive interval for client connections.
|
||||
client_keepalive = 60
|
||||
|
||||
# Maximum client inactivity before dropping the connection.
|
||||
client_ack = 300
|
||||
|
||||
# Instant retry count for a single ME endpoint before giving up on it.
|
||||
me_one_retry = 3
|
||||
|
||||
# Timeout (milliseconds) for a single ME endpoint connection attempt.
|
||||
me_one_timeout_ms = 1500
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# ANTI-CENSORSHIP / FAKETLS / MASKING
|
||||
# This is where Telemt becomes invisible to Deep Packet Inspection.
|
||||
# ==============================================================================
|
||||
|
||||
[censorship]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# TLS Domain Fronting
|
||||
# The SNI (Server Name Indication) your proxy presents to connecting clients.
|
||||
# Must be a popular, unblocked HTTPS website in your target country.
|
||||
# DPI sees traffic to this domain. Choose carefully.
|
||||
# Good choices: major CDNs, banks, government sites, search engines.
|
||||
# Bad choices: obscure sites, already-blocked domains.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
tls_domain = "www.google.com"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Active Probe Masking
|
||||
# When someone connects but fails the MTProto handshake (wrong secret),
|
||||
# they might be an ISP active prober testing if this is a proxy.
|
||||
#
|
||||
# mask = false: drop the connection (prober knows something is here)
|
||||
# mask = true: transparently proxy them to mask_host (prober sees a real website)
|
||||
#
|
||||
# With mask enabled, your server is indistinguishable from a real web server
|
||||
# to anyone who doesn't have the correct secret.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
mask = true
|
||||
|
||||
# The real web server to forward failed handshakes to.
|
||||
# If omitted, defaults to tls_domain.
|
||||
# mask_host = "www.google.com"
|
||||
|
||||
# Port on the mask host to connect to.
|
||||
mask_port = 443
|
||||
|
||||
# Inject PROXY protocol header when forwarding to mask host.
|
||||
# 0 = disabled, 1 = v1, 2 = v2. Leave disabled unless mask_host expects it.
|
||||
# mask_proxy_protocol = 0
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# TLS Certificate Emulation
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Size (bytes) of the locally generated fake TLS certificate.
|
||||
# Only used when tls_emulation is disabled.
|
||||
fake_cert_len = 2048
|
||||
|
||||
# KILLER FEATURE: Real-Time TLS Emulation.
|
||||
# Telemt connects to tls_domain, fetches its actual TLS 1.3 certificate chain,
|
||||
# and exactly replicates the byte sizes of ServerHello and Certificate records.
|
||||
# Defeats DPI that uses TLS record length heuristics to detect proxies.
|
||||
# Strongly recommended in censored environments.
|
||||
tls_emulation = true
|
||||
|
||||
# Directory to cache fetched TLS certificates.
|
||||
tls_front_dir = "tlsfront"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ServerHello Timing
|
||||
# Real web servers take 30-150ms to respond to ClientHello due to network
|
||||
# latency and crypto processing. A proxy responding in <1ms is suspicious.
|
||||
# These settings add realistic delay to mimic genuine server behavior.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Minimum delay before sending ServerHello (milliseconds).
|
||||
server_hello_delay_min_ms = 50
|
||||
|
||||
# Maximum delay before sending ServerHello (milliseconds).
|
||||
server_hello_delay_max_ms = 150
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# TLS Session Tickets
|
||||
# Real TLS 1.3 servers send 1-2 NewSessionTicket messages after handshake.
|
||||
# A server that sends zero tickets is anomalous and may trigger DPI flags.
|
||||
# Set this to match your tls_domain's behavior (usually 2).
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# tls_new_session_tickets = 0
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Full Certificate Frequency
|
||||
# When tls_emulation is enabled, this controls how often (per client IP)
|
||||
# to send the complete emulated certificate chain.
|
||||
#
|
||||
# > 0: Subsequent connections within TTL seconds get a smaller cached version.
|
||||
# Saves bandwidth but creates a detectable size difference between
|
||||
# first and repeat connections.
|
||||
#
|
||||
# = 0: Every connection gets the full certificate. More bandwidth but
|
||||
# perfectly consistent behavior, no anomalies for DPI to detect.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
tls_full_cert_ttl_secs = 0
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ALPN Enforcement
|
||||
# Ensure ServerHello responds with the exact ALPN protocol the client requested.
|
||||
# Mismatched ALPN (e.g., client asks h2, server says http/1.1) is a DPI red flag.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
alpn_enforce = true
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# ACCESS CONTROL & USERS
|
||||
# ==============================================================================
|
||||
|
||||
[access]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Replay Attack Protection
|
||||
# DPI can record a legitimate user's handshake and replay it later to probe
|
||||
# whether the server is a proxy. Telemt remembers recent handshake nonces
|
||||
# and rejects duplicates.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Number of nonce slots in the replay detection buffer.
|
||||
replay_check_len = 65536
|
||||
|
||||
# How long (seconds) to remember nonces before expiring them.
|
||||
replay_window_secs = 1800
|
||||
|
||||
# Allow clients with incorrect system clocks to connect.
|
||||
# false = reject clients with significant time skew (more secure)
|
||||
# true = accept anyone regardless of clock (more permissive)
|
||||
ignore_time_skew = false
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# User Secrets
|
||||
# Each user needs a unique 32-character hex string as their secret.
|
||||
# Generate with: openssl rand -hex 16
|
||||
#
|
||||
# This secret is embedded in the tg:// link. Anyone with it can connect.
|
||||
# Format: username = "hex_secret"
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.users]
|
||||
# alice = "0123456789abcdef0123456789abcdef"
|
||||
# bob = "fedcba9876543210fedcba9876543210"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Connection Limits
|
||||
# Limits concurrent TCP connections per user to prevent secret sharing.
|
||||
# Uncomment and set for each user as needed.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_max_tcp_conns]
|
||||
# alice = 100
|
||||
# bob = 50
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Expiration Dates
|
||||
# Automatically revoke access after the specified date (ISO 8601 format).
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_expirations]
|
||||
# alice = "2025-12-31T23:59:59Z"
|
||||
# bob = "2026-06-15T00:00:00Z"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Data Quotas
|
||||
# Maximum total bytes transferred per user. Connection refused after limit.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_data_quota]
|
||||
# alice = 107374182400
|
||||
# bob = 53687091200
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Unique IP Limits
|
||||
# Maximum number of different IP addresses that can use this secret
|
||||
# at the same time. Highly effective against secret leaking/sharing.
|
||||
# Set to 1 for single-device, 2-3 for phone+desktop, etc.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_max_unique_ips]
|
||||
# alice = 3
|
||||
# bob = 2
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# UPSTREAM ROUTING
|
||||
# Controls how Telemt connects to Telegram servers (or ME servers).
|
||||
# If omitted entirely, uses the OS default route.
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Direct upstream: use the server's own network interface.
|
||||
# You can optionally bind to a specific interface or local IP.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# [[upstreams]]
|
||||
# type = "direct"
|
||||
# interface = "eth0"
|
||||
# bind_addresses = ["192.0.2.10"]
|
||||
# weight = 1
|
||||
# enabled = true
|
||||
# scopes = "*"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# SOCKS5 upstream: route Telegram traffic through a SOCKS5 proxy.
|
||||
# Useful if your server's IP is blocked from reaching Telegram DCs.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# [[upstreams]]
|
||||
# type = "socks5"
|
||||
# address = "198.51.100.30:1080"
|
||||
# username = "proxy-user"
|
||||
# password = "proxy-pass"
|
||||
# weight = 1
|
||||
# enabled = true
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# DATACENTER OVERRIDES
|
||||
# Force specific DC IDs to route to specific IP:Port combinations.
|
||||
# DC 203 (CDN) is auto-injected by Telemt if not specified here.
|
||||
# ==============================================================================
|
||||
|
||||
# [dc_overrides]
|
||||
# "201" = "149.154.175.50:443"
|
||||
# "202" = ["149.154.167.51:443", "149.154.175.100:443"]
|
||||
10
config.toml
10
config.toml
@@ -4,7 +4,7 @@
|
||||
|
||||
# === General Settings ===
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
use_middle_proxy = true
|
||||
# Global ad_tag fallback when user has no per-user tag in [access.user_ad_tags]
|
||||
# ad_tag = "00000000000000000000000000000000"
|
||||
# Per-user ad_tag in [access.user_ad_tags] (32 hex from @MTProxybot)
|
||||
@@ -32,8 +32,16 @@ show = "*"
|
||||
port = 443
|
||||
# proxy_protocol = false # Enable if behind HAProxy/nginx with PROXY protocol
|
||||
# metrics_port = 9090
|
||||
# metrics_listen = "0.0.0.0:9090" # Listen address for metrics (overrides metrics_port)
|
||||
# metrics_whitelist = ["127.0.0.1", "::1", "0.0.0.0/0"]
|
||||
|
||||
[server.api]
|
||||
enabled = true
|
||||
listen = "0.0.0.0:9091"
|
||||
whitelist = ["127.0.0.0/8"]
|
||||
minimal_runtime_enabled = false
|
||||
minimal_runtime_cache_ttl_ms = 1000
|
||||
|
||||
# Listen on multiple interfaces/IPs - IPv4
|
||||
[[server.listeners]]
|
||||
ip = "0.0.0.0"
|
||||
|
||||
16
contrib/openbsd/telemt.rcd
Normal file
16
contrib/openbsd/telemt.rcd
Normal file
@@ -0,0 +1,16 @@
|
||||
#!/bin/ksh
|
||||
# /etc/rc.d/telemt
|
||||
#
|
||||
# rc.d(8) script for Telemt MTProxy daemon.
|
||||
# Tokio runtime does not daemonize itself, so rc_bg=YES is used.
|
||||
|
||||
daemon="/usr/local/bin/telemt"
|
||||
daemon_user="_telemt"
|
||||
daemon_flags="/etc/telemt/config.toml"
|
||||
|
||||
. /etc/rc.d/rc.subr
|
||||
|
||||
rc_bg=YES
|
||||
rc_reload=NO
|
||||
|
||||
rc_cmd $1
|
||||
3
contrib/systemd/system-user-telemt.conf
Normal file
3
contrib/systemd/system-user-telemt.conf
Normal file
@@ -0,0 +1,3 @@
|
||||
u telemt - "telemt user" /var/lib/telemt -
|
||||
g telemt - -
|
||||
m telemt telemt
|
||||
21
contrib/systemd/telemt.service
Normal file
21
contrib/systemd/telemt.service
Normal file
@@ -0,0 +1,21 @@
|
||||
[Unit]
|
||||
Description=Telemt
|
||||
Wants=network-online.target
|
||||
After=multi-user.target network.target network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=telemt
|
||||
Group=telemt
|
||||
WorkingDirectory=/var/lib/telemt
|
||||
ExecStart=/usr/bin/telemt /etc/telemt/telemt.toml
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
LimitNOFILE=65536
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
NoNewPrivileges=true
|
||||
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
1
contrib/systemd/tmpfiles-telemt.conf
Normal file
1
contrib/systemd/tmpfiles-telemt.conf
Normal file
@@ -0,0 +1 @@
|
||||
d /var/lib/telemt 700 telemt telemt
|
||||
@@ -7,6 +7,7 @@ services:
|
||||
ports:
|
||||
- "443:443"
|
||||
- "127.0.0.1:9090:9090"
|
||||
- "127.0.0.1:9091:9091"
|
||||
# Allow caching 'proxy-secret' in read-only container
|
||||
working_dir: /run/telemt
|
||||
volumes:
|
||||
|
||||
740
docs/API.md
740
docs/API.md
@@ -13,13 +13,25 @@ API runtime is configured in `[server.api]`.
|
||||
| `listen` | `string` (`IP:PORT`) | `127.0.0.1:9091` | API bind address. |
|
||||
| `whitelist` | `CIDR[]` | `127.0.0.1/32, ::1/128` | Source IP allowlist. Empty list means allow all. |
|
||||
| `auth_header` | `string` | `""` | Exact value for `Authorization` header. Empty disables header auth. |
|
||||
| `request_body_limit_bytes` | `usize` | `65536` | Maximum request body size. |
|
||||
| `request_body_limit_bytes` | `usize` | `65536` | Maximum request body size. Must be `> 0`. |
|
||||
| `minimal_runtime_enabled` | `bool` | `false` | Enables runtime snapshot endpoints requiring ME pool read-lock aggregation. |
|
||||
| `minimal_runtime_cache_ttl_ms` | `u64` | `1000` | Cache TTL for minimal snapshots. `0` disables cache. |
|
||||
| `minimal_runtime_cache_ttl_ms` | `u64` | `1000` | Cache TTL for minimal snapshots. `0` disables cache; valid range is `[0, 60000]`. |
|
||||
| `runtime_edge_enabled` | `bool` | `false` | Enables runtime edge endpoints with cached aggregation payloads. |
|
||||
| `runtime_edge_cache_ttl_ms` | `u64` | `1000` | Cache TTL for runtime edge summary payloads. `0` disables cache. |
|
||||
| `runtime_edge_top_n` | `usize` | `10` | Top-N rows for runtime edge leaderboard payloads. |
|
||||
| `runtime_edge_events_capacity` | `usize` | `256` | Ring-buffer size for `/v1/runtime/events/recent`. |
|
||||
| `read_only` | `bool` | `false` | Disables mutating endpoints. |
|
||||
|
||||
`server.admin_api` is accepted as an alias for backward compatibility.
|
||||
|
||||
Runtime validation for API config:
|
||||
- `server.api.listen` must be a valid `IP:PORT`.
|
||||
- `server.api.request_body_limit_bytes` must be `> 0`.
|
||||
- `server.api.minimal_runtime_cache_ttl_ms` must be within `[0, 60000]`.
|
||||
- `server.api.runtime_edge_cache_ttl_ms` must be within `[0, 60000]`.
|
||||
- `server.api.runtime_edge_top_n` must be within `[1, 1000]`.
|
||||
- `server.api.runtime_edge_events_capacity` must be within `[16, 4096]`.
|
||||
|
||||
## Protocol Contract
|
||||
|
||||
| Item | Value |
|
||||
@@ -51,23 +63,52 @@ API runtime is configured in `[server.api]`.
|
||||
}
|
||||
```
|
||||
|
||||
## Request Processing Order
|
||||
|
||||
Requests are processed in this order:
|
||||
1. `api_enabled` gate (`503 api_disabled` if disabled).
|
||||
2. Source IP whitelist gate (`403 forbidden`).
|
||||
3. `Authorization` header gate when configured (`401 unauthorized`).
|
||||
4. Route and method matching (`404 not_found` or `405 method_not_allowed`).
|
||||
5. `read_only` gate for mutating routes (`403 read_only`).
|
||||
6. Request body read/limit/JSON decode (`413 payload_too_large`, `400 bad_request`).
|
||||
7. Business validation and config write path.
|
||||
|
||||
Notes:
|
||||
- Whitelist is evaluated against the direct TCP peer IP (`SocketAddr::ip`), without `X-Forwarded-For` support.
|
||||
- `Authorization` check is exact string equality against configured `auth_header`.
|
||||
|
||||
## Endpoint Matrix
|
||||
|
||||
| Method | Path | Body | Success | `data` contract |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| `GET` | `/v1/health` | none | `200` | `HealthData` |
|
||||
| `GET` | `/v1/system/info` | none | `200` | `SystemInfoData` |
|
||||
| `GET` | `/v1/runtime/gates` | none | `200` | `RuntimeGatesData` |
|
||||
| `GET` | `/v1/runtime/initialization` | none | `200` | `RuntimeInitializationData` |
|
||||
| `GET` | `/v1/limits/effective` | none | `200` | `EffectiveLimitsData` |
|
||||
| `GET` | `/v1/security/posture` | none | `200` | `SecurityPostureData` |
|
||||
| `GET` | `/v1/security/whitelist` | none | `200` | `SecurityWhitelistData` |
|
||||
| `GET` | `/v1/stats/summary` | none | `200` | `SummaryData` |
|
||||
| `GET` | `/v1/stats/zero/all` | none | `200` | `ZeroAllData` |
|
||||
| `GET` | `/v1/stats/upstreams` | none | `200` | `UpstreamsData` |
|
||||
| `GET` | `/v1/stats/minimal/all` | none | `200` | `MinimalAllData` |
|
||||
| `GET` | `/v1/stats/me-writers` | none | `200` | `MeWritersData` |
|
||||
| `GET` | `/v1/stats/dcs` | none | `200` | `DcStatusData` |
|
||||
| `GET` | `/v1/runtime/me_pool_state` | none | `200` | `RuntimeMePoolStateData` |
|
||||
| `GET` | `/v1/runtime/me_quality` | none | `200` | `RuntimeMeQualityData` |
|
||||
| `GET` | `/v1/runtime/upstream_quality` | none | `200` | `RuntimeUpstreamQualityData` |
|
||||
| `GET` | `/v1/runtime/nat_stun` | none | `200` | `RuntimeNatStunData` |
|
||||
| `GET` | `/v1/runtime/me-selftest` | none | `200` | `RuntimeMeSelftestData` |
|
||||
| `GET` | `/v1/runtime/connections/summary` | none | `200` | `RuntimeEdgeConnectionsSummaryData` |
|
||||
| `GET` | `/v1/runtime/events/recent` | none | `200` | `RuntimeEdgeEventsData` |
|
||||
| `GET` | `/v1/stats/users` | none | `200` | `UserInfo[]` |
|
||||
| `GET` | `/v1/users` | none | `200` | `UserInfo[]` |
|
||||
| `POST` | `/v1/users` | `CreateUserRequest` | `201` | `CreateUserResponse` |
|
||||
| `GET` | `/v1/users/{username}` | none | `200` | `UserInfo` |
|
||||
| `PATCH` | `/v1/users/{username}` | `PatchUserRequest` | `200` | `UserInfo` |
|
||||
| `DELETE` | `/v1/users/{username}` | none | `200` | `string` (deleted username) |
|
||||
| `POST` | `/v1/users/{username}/rotate-secret` | `RotateSecretRequest` or empty body | `200` | `CreateUserResponse` |
|
||||
| `POST` | `/v1/users/{username}/rotate-secret` | `RotateSecretRequest` or empty body | `404` | `ErrorResponse` (`not_found`, current runtime behavior) |
|
||||
|
||||
## Common Error Codes
|
||||
|
||||
@@ -77,8 +118,8 @@ API runtime is configured in `[server.api]`.
|
||||
| `401` | `unauthorized` | Missing/invalid `Authorization` when `auth_header` is configured. |
|
||||
| `403` | `forbidden` | Source IP is not allowed by whitelist. |
|
||||
| `403` | `read_only` | Mutating endpoint called while `read_only=true`. |
|
||||
| `404` | `not_found` | Unknown route or unknown user. |
|
||||
| `405` | `method_not_allowed` | Unsupported method for an existing user route. |
|
||||
| `404` | `not_found` | Unknown route, unknown user, or unsupported sub-route (including current `rotate-secret` route). |
|
||||
| `405` | `method_not_allowed` | Unsupported method for `/v1/users/{username}` route shape. |
|
||||
| `409` | `revision_conflict` | `If-Match` revision mismatch. |
|
||||
| `409` | `user_exists` | User already exists on create. |
|
||||
| `409` | `last_user_forbidden` | Attempt to delete last configured user. |
|
||||
@@ -86,6 +127,34 @@ API runtime is configured in `[server.api]`.
|
||||
| `500` | `internal_error` | Internal error (I/O, serialization, config load/save). |
|
||||
| `503` | `api_disabled` | API disabled in config. |
|
||||
|
||||
## Routing and Method Edge Cases
|
||||
|
||||
| Case | Behavior |
|
||||
| --- | --- |
|
||||
| Path matching | Exact match on `req.uri().path()`. Query string does not affect route matching. |
|
||||
| Trailing slash | Not normalized. Example: `/v1/users/` is `404`. |
|
||||
| Username route with extra slash | `/v1/users/{username}/...` is not treated as user route and returns `404`. |
|
||||
| `PUT /v1/users/{username}` | `405 method_not_allowed`. |
|
||||
| `POST /v1/users/{username}` | `404 not_found`. |
|
||||
| `POST /v1/users/{username}/rotate-secret` | `404 not_found` in current release due route matcher limitation. |
|
||||
|
||||
## Body and JSON Semantics
|
||||
|
||||
- Request body is read only for mutating routes that define a body contract.
|
||||
- Body size limit is enforced during streaming read (`413 payload_too_large`).
|
||||
- Invalid transport body frame returns `400 bad_request` (`Invalid request body`).
|
||||
- Invalid JSON returns `400 bad_request` (`Invalid JSON body`).
|
||||
- `Content-Type` is not required for JSON parsing.
|
||||
- Unknown JSON fields are ignored by deserialization.
|
||||
- `PATCH` updates only provided fields and does not support explicit clearing of optional fields.
|
||||
- `If-Match` supports both quoted and unquoted values; surrounding whitespace is trimmed.
|
||||
|
||||
## Query Parameters
|
||||
|
||||
| Endpoint | Query | Behavior |
|
||||
| --- | --- | --- |
|
||||
| `GET /v1/runtime/events/recent` | `limit=<usize>` | Optional. Invalid/missing value falls back to default `50`. Effective value is clamped to `[1, 1000]` and additionally bounded by ring-buffer capacity. |
|
||||
|
||||
## Request Contracts
|
||||
|
||||
### `CreateUserRequest`
|
||||
@@ -114,6 +183,8 @@ API runtime is configured in `[server.api]`.
|
||||
| --- | --- | --- | --- |
|
||||
| `secret` | `string` | no | Exactly 32 hex chars. If missing, generated automatically. |
|
||||
|
||||
Note: the request contract is defined, but the corresponding route currently returns `404` (see routing edge cases).
|
||||
|
||||
## Response Data Contracts
|
||||
|
||||
### `HealthData`
|
||||
@@ -131,6 +202,501 @@ API runtime is configured in `[server.api]`.
|
||||
| `handshake_timeouts_total` | `u64` | Handshake timeout count. |
|
||||
| `configured_users` | `usize` | Number of configured users in config. |
|
||||
|
||||
### `SystemInfoData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `version` | `string` | Binary version (`CARGO_PKG_VERSION`). |
|
||||
| `target_arch` | `string` | Target architecture (`std::env::consts::ARCH`). |
|
||||
| `target_os` | `string` | Target OS (`std::env::consts::OS`). |
|
||||
| `build_profile` | `string` | Build profile (`PROFILE` env when available). |
|
||||
| `git_commit` | `string?` | Optional commit hash from build env metadata. |
|
||||
| `build_time_utc` | `string?` | Optional build timestamp from build env metadata. |
|
||||
| `rustc_version` | `string?` | Optional compiler version from build env metadata. |
|
||||
| `process_started_at_epoch_secs` | `u64` | Process start time as Unix epoch seconds. |
|
||||
| `uptime_seconds` | `f64` | Process uptime in seconds. |
|
||||
| `config_path` | `string` | Active config file path used by runtime. |
|
||||
| `config_hash` | `string` | SHA-256 hash of current config content (same value as envelope `revision`). |
|
||||
| `config_reload_count` | `u64` | Number of successfully observed config updates since process start. |
|
||||
| `last_config_reload_epoch_secs` | `u64?` | Unix epoch seconds of the latest observed config reload; null/absent before first reload. |
|
||||
|
||||
### `RuntimeGatesData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `accepting_new_connections` | `bool` | Current admission-gate state for new listener accepts. |
|
||||
| `conditional_cast_enabled` | `bool` | Whether conditional ME admission logic is enabled (`general.use_middle_proxy`). |
|
||||
| `me_runtime_ready` | `bool` | Current ME runtime readiness status used for conditional gate decisions. |
|
||||
| `me2dc_fallback_enabled` | `bool` | Whether ME -> direct fallback is enabled. |
|
||||
| `use_middle_proxy` | `bool` | Current transport mode preference. |
|
||||
| `startup_status` | `string` | Startup status (`pending`, `initializing`, `ready`, `failed`, `skipped`). |
|
||||
| `startup_stage` | `string` | Current startup stage identifier. |
|
||||
| `startup_progress_pct` | `f64` | Startup progress percentage (`0..100`). |
|
||||
|
||||
### `RuntimeInitializationData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `status` | `string` | Startup status (`pending`, `initializing`, `ready`, `failed`, `skipped`). |
|
||||
| `degraded` | `bool` | Whether runtime is currently in degraded mode. |
|
||||
| `current_stage` | `string` | Current startup stage identifier. |
|
||||
| `progress_pct` | `f64` | Overall startup progress percentage (`0..100`). |
|
||||
| `started_at_epoch_secs` | `u64` | Process start timestamp (Unix seconds). |
|
||||
| `ready_at_epoch_secs` | `u64?` | Timestamp when startup reached ready state; absent until ready. |
|
||||
| `total_elapsed_ms` | `u64` | Elapsed startup duration in milliseconds. |
|
||||
| `transport_mode` | `string` | Startup transport mode (`middle_proxy` or `direct`). |
|
||||
| `me` | `RuntimeInitializationMeData` | ME startup substate snapshot. |
|
||||
| `components` | `RuntimeInitializationComponentData[]` | Per-component startup timeline and status. |
|
||||
|
||||
#### `RuntimeInitializationMeData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `status` | `string` | ME startup status (`pending`, `initializing`, `ready`, `failed`, `skipped`). |
|
||||
| `current_stage` | `string` | Current ME startup stage identifier. |
|
||||
| `progress_pct` | `f64` | ME startup progress percentage (`0..100`). |
|
||||
| `init_attempt` | `u32` | Current ME init attempt counter. |
|
||||
| `retry_limit` | `string` | Retry limit (`"unlimited"` or numeric string). |
|
||||
| `last_error` | `string?` | Last ME initialization error text when present. |
|
||||
|
||||
#### `RuntimeInitializationComponentData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `id` | `string` | Startup component identifier. |
|
||||
| `title` | `string` | Human-readable component title. |
|
||||
| `status` | `string` | Component status (`pending`, `running`, `ready`, `failed`, `skipped`). |
|
||||
| `started_at_epoch_ms` | `u64?` | Component start timestamp in Unix milliseconds. |
|
||||
| `finished_at_epoch_ms` | `u64?` | Component finish timestamp in Unix milliseconds. |
|
||||
| `duration_ms` | `u64?` | Component duration in milliseconds. |
|
||||
| `attempts` | `u32` | Attempt counter for this component. |
|
||||
| `details` | `string?` | Optional short status details text. |
|
||||
|
||||
### `EffectiveLimitsData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `update_every_secs` | `u64` | Effective unified updater interval. |
|
||||
| `me_reinit_every_secs` | `u64` | Effective ME periodic reinit interval. |
|
||||
| `me_pool_force_close_secs` | `u64` | Effective stale-writer force-close timeout. |
|
||||
| `timeouts` | `EffectiveTimeoutLimits` | Effective timeout policy snapshot. |
|
||||
| `upstream` | `EffectiveUpstreamLimits` | Effective upstream connect/retry limits. |
|
||||
| `middle_proxy` | `EffectiveMiddleProxyLimits` | Effective ME pool/floor/reconnect limits. |
|
||||
| `user_ip_policy` | `EffectiveUserIpPolicyLimits` | Effective unique-IP policy mode/window. |
|
||||
|
||||
#### `EffectiveTimeoutLimits`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `client_handshake_secs` | `u64` | Client handshake timeout. |
|
||||
| `tg_connect_secs` | `u64` | Upstream Telegram connect timeout. |
|
||||
| `client_keepalive_secs` | `u64` | Client keepalive interval. |
|
||||
| `client_ack_secs` | `u64` | ACK timeout. |
|
||||
| `me_one_retry` | `u8` | Fast retry count for single-endpoint ME DC. |
|
||||
| `me_one_timeout_ms` | `u64` | Fast retry timeout per attempt for single-endpoint ME DC. |
|
||||
|
||||
#### `EffectiveUpstreamLimits`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `connect_retry_attempts` | `u32` | Upstream connect retry attempts. |
|
||||
| `connect_retry_backoff_ms` | `u64` | Upstream retry backoff delay. |
|
||||
| `connect_budget_ms` | `u64` | Total connect wall-clock budget across retries. |
|
||||
| `unhealthy_fail_threshold` | `u32` | Consecutive fail threshold for unhealthy marking. |
|
||||
| `connect_failfast_hard_errors` | `bool` | Whether hard errors skip additional retries. |
|
||||
|
||||
#### `EffectiveMiddleProxyLimits`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `floor_mode` | `string` | Effective floor mode (`static` or `adaptive`). |
|
||||
| `adaptive_floor_idle_secs` | `u64` | Adaptive floor idle threshold. |
|
||||
| `adaptive_floor_min_writers_single_endpoint` | `u8` | Adaptive floor minimum for single-endpoint DCs. |
|
||||
| `adaptive_floor_min_writers_multi_endpoint` | `u8` | Adaptive floor minimum for multi-endpoint DCs. |
|
||||
| `adaptive_floor_recover_grace_secs` | `u64` | Adaptive floor recovery grace period. |
|
||||
| `adaptive_floor_writers_per_core_total` | `u16` | Target total writers-per-core budget in adaptive mode. |
|
||||
| `adaptive_floor_cpu_cores_override` | `u16` | Manual CPU core override (`0` means auto-detect). |
|
||||
| `adaptive_floor_max_extra_writers_single_per_core` | `u16` | Extra per-core adaptive headroom for single-endpoint DCs. |
|
||||
| `adaptive_floor_max_extra_writers_multi_per_core` | `u16` | Extra per-core adaptive headroom for multi-endpoint DCs. |
|
||||
| `adaptive_floor_max_active_writers_per_core` | `u16` | Active writer cap per CPU core. |
|
||||
| `adaptive_floor_max_warm_writers_per_core` | `u16` | Warm writer cap per CPU core. |
|
||||
| `adaptive_floor_max_active_writers_global` | `u32` | Global active writer cap. |
|
||||
| `adaptive_floor_max_warm_writers_global` | `u32` | Global warm writer cap. |
|
||||
| `reconnect_max_concurrent_per_dc` | `u32` | Max concurrent reconnects per DC. |
|
||||
| `reconnect_backoff_base_ms` | `u64` | Reconnect base backoff. |
|
||||
| `reconnect_backoff_cap_ms` | `u64` | Reconnect backoff cap. |
|
||||
| `reconnect_fast_retry_count` | `u32` | Number of fast retries before standard backoff strategy. |
|
||||
| `writer_pick_mode` | `string` | Writer picker mode (`sorted_rr`, `p2c`). |
|
||||
| `writer_pick_sample_size` | `u8` | Candidate sample size for `p2c` picker mode. |
|
||||
| `me2dc_fallback` | `bool` | Effective ME -> direct fallback flag. |
|
||||
|
||||
#### `EffectiveUserIpPolicyLimits`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `mode` | `string` | Unique-IP policy mode (`active_window`, `time_window`, `combined`). |
|
||||
| `window_secs` | `u64` | Time window length used by unique-IP policy. |
|
||||
|
||||
### `SecurityPostureData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `api_read_only` | `bool` | Current API read-only state. |
|
||||
| `api_whitelist_enabled` | `bool` | Whether whitelist filtering is active. |
|
||||
| `api_whitelist_entries` | `usize` | Number of configured whitelist CIDRs. |
|
||||
| `api_auth_header_enabled` | `bool` | Whether `Authorization` header validation is active. |
|
||||
| `proxy_protocol_enabled` | `bool` | Global PROXY protocol accept setting. |
|
||||
| `log_level` | `string` | Effective log level (`debug`, `verbose`, `normal`, `silent`). |
|
||||
| `telemetry_core_enabled` | `bool` | Core telemetry toggle. |
|
||||
| `telemetry_user_enabled` | `bool` | Per-user telemetry toggle. |
|
||||
| `telemetry_me_level` | `string` | ME telemetry level (`silent`, `normal`, `debug`). |
|
||||
|
||||
### `SecurityWhitelistData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot generation timestamp. |
|
||||
| `enabled` | `bool` | `true` when whitelist has at least one CIDR entry. |
|
||||
| `entries_total` | `usize` | Number of whitelist CIDR entries. |
|
||||
| `entries` | `string[]` | Whitelist CIDR entries as strings. |
|
||||
|
||||
### `RuntimeMePoolStateData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `enabled` | `bool` | Runtime payload availability. |
|
||||
| `reason` | `string?` | `source_unavailable` when ME pool snapshot is unavailable. |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot generation timestamp. |
|
||||
| `data` | `RuntimeMePoolStatePayload?` | Null when unavailable. |
|
||||
|
||||
#### `RuntimeMePoolStatePayload`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `generations` | `RuntimeMePoolStateGenerationData` | Active/warm/pending/draining generation snapshot. |
|
||||
| `hardswap` | `RuntimeMePoolStateHardswapData` | Hardswap state flags. |
|
||||
| `writers` | `RuntimeMePoolStateWriterData` | Writer total/contour/health counters. |
|
||||
| `refill` | `RuntimeMePoolStateRefillData` | In-flight refill counters by DC/family. |
|
||||
|
||||
#### `RuntimeMePoolStateGenerationData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `active_generation` | `u64` | Active pool generation id. |
|
||||
| `warm_generation` | `u64` | Warm pool generation id. |
|
||||
| `pending_hardswap_generation` | `u64` | Pending hardswap generation id (`0` when none). |
|
||||
| `pending_hardswap_age_secs` | `u64?` | Age of pending hardswap generation in seconds. |
|
||||
| `draining_generations` | `u64[]` | Distinct generation ids currently draining. |
|
||||
|
||||
#### `RuntimeMePoolStateHardswapData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `enabled` | `bool` | Hardswap feature toggle. |
|
||||
| `pending` | `bool` | `true` when pending generation is non-zero. |
|
||||
|
||||
#### `RuntimeMePoolStateWriterData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `total` | `usize` | Total writer rows in snapshot. |
|
||||
| `alive_non_draining` | `usize` | Alive writers excluding draining ones. |
|
||||
| `draining` | `usize` | Writers marked draining. |
|
||||
| `degraded` | `usize` | Non-draining degraded writers. |
|
||||
| `contour` | `RuntimeMePoolStateWriterContourData` | Counts by contour state. |
|
||||
| `health` | `RuntimeMePoolStateWriterHealthData` | Counts by health bucket. |
|
||||
|
||||
#### `RuntimeMePoolStateWriterContourData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `warm` | `usize` | Writers in warm contour. |
|
||||
| `active` | `usize` | Writers in active contour. |
|
||||
| `draining` | `usize` | Writers in draining contour. |
|
||||
|
||||
#### `RuntimeMePoolStateWriterHealthData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `healthy` | `usize` | Non-draining non-degraded writers. |
|
||||
| `degraded` | `usize` | Non-draining degraded writers. |
|
||||
| `draining` | `usize` | Draining writers. |
|
||||
|
||||
#### `RuntimeMePoolStateRefillData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `inflight_endpoints_total` | `usize` | Total in-flight endpoint refill operations. |
|
||||
| `inflight_dc_total` | `usize` | Number of distinct DC+family keys with refill in flight. |
|
||||
| `by_dc` | `RuntimeMePoolStateRefillDcData[]` | Per-DC refill rows. |
|
||||
|
||||
#### `RuntimeMePoolStateRefillDcData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `dc` | `i16` | Telegram DC id. |
|
||||
| `family` | `string` | Address family label (`V4`, `V6`). |
|
||||
| `inflight` | `usize` | In-flight refill operations for this row. |
|
||||
|
||||
### `RuntimeMeQualityData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `enabled` | `bool` | Runtime payload availability. |
|
||||
| `reason` | `string?` | `source_unavailable` when ME pool snapshot is unavailable. |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot generation timestamp. |
|
||||
| `data` | `RuntimeMeQualityPayload?` | Null when unavailable. |
|
||||
|
||||
#### `RuntimeMeQualityPayload`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `counters` | `RuntimeMeQualityCountersData` | Key ME lifecycle/error counters. |
|
||||
| `route_drops` | `RuntimeMeQualityRouteDropData` | Route drop counters by reason. |
|
||||
| `dc_rtt` | `RuntimeMeQualityDcRttData[]` | Per-DC RTT and writer coverage rows. |
|
||||
|
||||
#### `RuntimeMeQualityCountersData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `idle_close_by_peer_total` | `u64` | Peer-initiated idle closes. |
|
||||
| `reader_eof_total` | `u64` | Reader EOF events. |
|
||||
| `kdf_drift_total` | `u64` | KDF drift detections. |
|
||||
| `kdf_port_only_drift_total` | `u64` | KDF port-only drift detections. |
|
||||
| `reconnect_attempt_total` | `u64` | Reconnect attempts. |
|
||||
| `reconnect_success_total` | `u64` | Successful reconnects. |
|
||||
|
||||
#### `RuntimeMeQualityRouteDropData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `no_conn_total` | `u64` | Route drops with no connection mapping. |
|
||||
| `channel_closed_total` | `u64` | Route drops because destination channel is closed. |
|
||||
| `queue_full_total` | `u64` | Route drops due queue backpressure (aggregate). |
|
||||
| `queue_full_base_total` | `u64` | Route drops in base-queue path. |
|
||||
| `queue_full_high_total` | `u64` | Route drops in high-priority queue path. |
|
||||
|
||||
#### `RuntimeMeQualityDcRttData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `dc` | `i16` | Telegram DC id. |
|
||||
| `rtt_ema_ms` | `f64?` | RTT EMA for this DC. |
|
||||
| `alive_writers` | `usize` | Alive writers currently mapped to this DC. |
|
||||
| `required_writers` | `usize` | Target writer floor for this DC. |
|
||||
| `coverage_pct` | `f64` | `alive_writers / required_writers * 100`. |
|
||||
|
||||
### `RuntimeUpstreamQualityData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `enabled` | `bool` | Runtime payload availability. |
|
||||
| `reason` | `string?` | `source_unavailable` when upstream runtime snapshot is unavailable. |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot generation timestamp. |
|
||||
| `policy` | `RuntimeUpstreamQualityPolicyData` | Effective upstream policy values. |
|
||||
| `counters` | `RuntimeUpstreamQualityCountersData` | Upstream connect counters. |
|
||||
| `summary` | `RuntimeUpstreamQualitySummaryData?` | Aggregate runtime health summary. |
|
||||
| `upstreams` | `RuntimeUpstreamQualityUpstreamData[]?` | Per-upstream runtime rows. |
|
||||
|
||||
#### `RuntimeUpstreamQualityPolicyData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `connect_retry_attempts` | `u32` | Upstream connect retry attempts. |
|
||||
| `connect_retry_backoff_ms` | `u64` | Upstream retry backoff delay. |
|
||||
| `connect_budget_ms` | `u64` | Total connect wall-clock budget. |
|
||||
| `unhealthy_fail_threshold` | `u32` | Consecutive fail threshold for unhealthy marking. |
|
||||
| `connect_failfast_hard_errors` | `bool` | Whether hard errors skip retries. |
|
||||
|
||||
#### `RuntimeUpstreamQualityCountersData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `connect_attempt_total` | `u64` | Total connect attempts. |
|
||||
| `connect_success_total` | `u64` | Successful connects. |
|
||||
| `connect_fail_total` | `u64` | Failed connects. |
|
||||
| `connect_failfast_hard_error_total` | `u64` | Fail-fast hard errors. |
|
||||
|
||||
#### `RuntimeUpstreamQualitySummaryData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `configured_total` | `usize` | Total configured upstream entries. |
|
||||
| `healthy_total` | `usize` | Upstreams currently healthy. |
|
||||
| `unhealthy_total` | `usize` | Upstreams currently unhealthy. |
|
||||
| `direct_total` | `usize` | Direct-route upstream entries. |
|
||||
| `socks4_total` | `usize` | SOCKS4 upstream entries. |
|
||||
| `socks5_total` | `usize` | SOCKS5 upstream entries. |
|
||||
| `shadowsocks_total` | `usize` | Shadowsocks upstream entries. |
|
||||
|
||||
#### `RuntimeUpstreamQualityUpstreamData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `upstream_id` | `usize` | Runtime upstream index. |
|
||||
| `route_kind` | `string` | `direct`, `socks4`, `socks5`, `shadowsocks`. |
|
||||
| `address` | `string` | Upstream address (`direct` literal for direct route kind, `host:port` only for proxied upstreams). |
|
||||
| `weight` | `u16` | Selection weight. |
|
||||
| `scopes` | `string` | Configured scope selector. |
|
||||
| `healthy` | `bool` | Current health flag. |
|
||||
| `fails` | `u32` | Consecutive fail counter. |
|
||||
| `last_check_age_secs` | `u64` | Seconds since last health update. |
|
||||
| `effective_latency_ms` | `f64?` | Effective latency score used by selector. |
|
||||
| `dc` | `RuntimeUpstreamQualityDcData[]` | Per-DC runtime rows. |
|
||||
|
||||
#### `RuntimeUpstreamQualityDcData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `dc` | `i16` | Telegram DC id. |
|
||||
| `latency_ema_ms` | `f64?` | Per-DC latency EMA. |
|
||||
| `ip_preference` | `string` | `unknown`, `prefer_v4`, `prefer_v6`, `both_work`, `unavailable`. |
|
||||
|
||||
### `RuntimeNatStunData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `enabled` | `bool` | Runtime payload availability. |
|
||||
| `reason` | `string?` | `source_unavailable` when shared STUN state is unavailable. |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot generation timestamp. |
|
||||
| `data` | `RuntimeNatStunPayload?` | Null when unavailable. |
|
||||
|
||||
#### `RuntimeNatStunPayload`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `flags` | `RuntimeNatStunFlagsData` | NAT probe runtime flags. |
|
||||
| `servers` | `RuntimeNatStunServersData` | Configured/live STUN server lists. |
|
||||
| `reflection` | `RuntimeNatStunReflectionBlockData` | Reflection cache data for v4/v6. |
|
||||
| `stun_backoff_remaining_ms` | `u64?` | Remaining retry backoff (milliseconds). |
|
||||
|
||||
#### `RuntimeNatStunFlagsData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `nat_probe_enabled` | `bool` | Current NAT probe enable state. |
|
||||
| `nat_probe_disabled_runtime` | `bool` | Runtime disable flag due failures/conditions. |
|
||||
| `nat_probe_attempts` | `u8` | Configured NAT probe attempt count. |
|
||||
|
||||
#### `RuntimeNatStunServersData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `configured` | `string[]` | Configured STUN server entries. |
|
||||
| `live` | `string[]` | Runtime live STUN server entries. |
|
||||
| `live_total` | `usize` | Number of live STUN entries. |
|
||||
|
||||
#### `RuntimeNatStunReflectionBlockData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `v4` | `RuntimeNatStunReflectionData?` | IPv4 reflection data. |
|
||||
| `v6` | `RuntimeNatStunReflectionData?` | IPv6 reflection data. |
|
||||
|
||||
#### `RuntimeNatStunReflectionData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `addr` | `string` | Reflected public endpoint (`ip:port`). |
|
||||
| `age_secs` | `u64` | Reflection value age in seconds. |
|
||||
|
||||
### `RuntimeMeSelftestData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `enabled` | `bool` | Runtime payload availability. |
|
||||
| `reason` | `string?` | `source_unavailable` when ME pool is unavailable. |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot generation timestamp. |
|
||||
| `data` | `RuntimeMeSelftestPayload?` | Null when unavailable. |
|
||||
|
||||
#### `RuntimeMeSelftestPayload`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `kdf` | `RuntimeMeSelftestKdfData` | KDF EWMA health state. |
|
||||
| `timeskew` | `RuntimeMeSelftestTimeskewData` | Date-header skew health state. |
|
||||
| `ip` | `RuntimeMeSelftestIpData` | Interface IP family classification. |
|
||||
| `pid` | `RuntimeMeSelftestPidData` | Process PID marker (`one|non-one`). |
|
||||
| `bnd` | `RuntimeMeSelftestBndData` | SOCKS BND.ADDR/BND.PORT health state. |
|
||||
|
||||
#### `RuntimeMeSelftestKdfData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `state` | `string` | `ok` or `error` based on EWMA threshold. |
|
||||
| `ewma_errors_per_min` | `f64` | EWMA KDF error rate per minute. |
|
||||
| `threshold_errors_per_min` | `f64` | Threshold used for `error` decision. |
|
||||
| `errors_total` | `u64` | Total source errors (`kdf_drift + socks_kdf_strict_reject`). |
|
||||
|
||||
#### `RuntimeMeSelftestTimeskewData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `state` | `string` | `ok` or `error` (`max_skew_secs_15m > 60` => `error`). |
|
||||
| `max_skew_secs_15m` | `u64?` | Maximum observed skew in the last 15 minutes. |
|
||||
| `samples_15m` | `usize` | Number of skew samples in the last 15 minutes. |
|
||||
| `last_skew_secs` | `u64?` | Latest observed skew value. |
|
||||
| `last_source` | `string?` | Latest skew source marker. |
|
||||
| `last_seen_age_secs` | `u64?` | Age of the latest skew sample. |
|
||||
|
||||
#### `RuntimeMeSelftestIpData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `v4` | `RuntimeMeSelftestIpFamilyData?` | IPv4 interface probe result; absent when unknown. |
|
||||
| `v6` | `RuntimeMeSelftestIpFamilyData?` | IPv6 interface probe result; absent when unknown. |
|
||||
|
||||
#### `RuntimeMeSelftestIpFamilyData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `addr` | `string` | Detected interface IP. |
|
||||
| `state` | `string` | `good`, `bogon`, or `loopback`. |
|
||||
|
||||
#### `RuntimeMeSelftestPidData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `pid` | `u32` | Current process PID. |
|
||||
| `state` | `string` | `one` when PID=1, otherwise `non-one`. |
|
||||
|
||||
#### `RuntimeMeSelftestBndData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `addr_state` | `string` | `ok`, `bogon`, or `error`. |
|
||||
| `port_state` | `string` | `ok`, `zero`, or `error`. |
|
||||
| `last_addr` | `string?` | Latest observed SOCKS BND address. |
|
||||
| `last_seen_age_secs` | `u64?` | Age of latest BND sample. |
|
||||
|
||||
### `RuntimeEdgeConnectionsSummaryData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `enabled` | `bool` | Endpoint availability under `runtime_edge_enabled`. |
|
||||
| `reason` | `string?` | `feature_disabled` or `source_unavailable`. |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot generation timestamp. |
|
||||
| `data` | `RuntimeEdgeConnectionsSummaryPayload?` | Null when unavailable. |
|
||||
|
||||
#### `RuntimeEdgeConnectionsSummaryPayload`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `cache` | `RuntimeEdgeConnectionCacheData` | Runtime edge cache metadata. |
|
||||
| `totals` | `RuntimeEdgeConnectionTotalsData` | Connection totals block. |
|
||||
| `top` | `RuntimeEdgeConnectionTopData` | Top-N leaderboard blocks. |
|
||||
| `telemetry` | `RuntimeEdgeConnectionTelemetryData` | Telemetry-policy flags for counters. |
|
||||
|
||||
#### `RuntimeEdgeConnectionCacheData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `ttl_ms` | `u64` | Configured cache TTL in milliseconds. |
|
||||
| `served_from_cache` | `bool` | `true` when payload is served from cache. |
|
||||
| `stale_cache_used` | `bool` | `true` when stale cache is used because recompute is busy. |
|
||||
|
||||
#### `RuntimeEdgeConnectionTotalsData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `current_connections` | `u64` | Current global live connections. |
|
||||
| `current_connections_me` | `u64` | Current live connections routed through ME. |
|
||||
| `current_connections_direct` | `u64` | Current live connections routed through direct path. |
|
||||
| `active_users` | `usize` | Users with `current_connections > 0`. |
|
||||
|
||||
#### `RuntimeEdgeConnectionTopData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `limit` | `usize` | Effective Top-N row count. |
|
||||
| `by_connections` | `RuntimeEdgeConnectionUserData[]` | Users sorted by current connections. |
|
||||
| `by_throughput` | `RuntimeEdgeConnectionUserData[]` | Users sorted by cumulative octets. |
|
||||
|
||||
#### `RuntimeEdgeConnectionUserData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `username` | `string` | Username. |
|
||||
| `current_connections` | `u64` | Current live connections for user. |
|
||||
| `total_octets` | `u64` | Cumulative (`client->proxy + proxy->client`) octets. |
|
||||
|
||||
#### `RuntimeEdgeConnectionTelemetryData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `user_enabled` | `bool` | Per-user telemetry enable flag. |
|
||||
| `throughput_is_cumulative` | `bool` | Always `true` in current implementation. |
|
||||
|
||||
### `RuntimeEdgeEventsData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `enabled` | `bool` | Endpoint availability under `runtime_edge_enabled`. |
|
||||
| `reason` | `string?` | `feature_disabled` when endpoint is disabled. |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot generation timestamp. |
|
||||
| `data` | `RuntimeEdgeEventsPayload?` | Null when unavailable. |
|
||||
|
||||
#### `RuntimeEdgeEventsPayload`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `capacity` | `usize` | Effective ring-buffer capacity. |
|
||||
| `dropped_total` | `u64` | Count of dropped oldest events due capacity pressure. |
|
||||
| `events` | `ApiEventRecord[]` | Recent events in chronological order. |
|
||||
|
||||
#### `ApiEventRecord`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `seq` | `u64` | Monotonic sequence number. |
|
||||
| `ts_epoch_secs` | `u64` | Event timestamp (Unix seconds). |
|
||||
| `event_type` | `string` | Event kind identifier. |
|
||||
| `context` | `string` | Context text (truncated to implementation-defined max length). |
|
||||
|
||||
### `ZeroAllData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
@@ -173,6 +739,48 @@ API runtime is configured in `[server.api]`.
|
||||
| `connect_duration_fail_bucket_501_1000ms` | `u64` | Failed connects 501-1000 ms. |
|
||||
| `connect_duration_fail_bucket_gt_1000ms` | `u64` | Failed connects >1000 ms. |
|
||||
|
||||
### `UpstreamsData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `enabled` | `bool` | Runtime upstream snapshot availability according to API config. |
|
||||
| `reason` | `string?` | `feature_disabled` or `source_unavailable` when runtime snapshot is unavailable. |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot generation time. |
|
||||
| `zero` | `ZeroUpstreamData` | Always available zero-cost upstream counters block. |
|
||||
| `summary` | `UpstreamSummaryData?` | Runtime upstream aggregate view, null when unavailable. |
|
||||
| `upstreams` | `UpstreamStatus[]?` | Per-upstream runtime status rows, null when unavailable. |
|
||||
|
||||
#### `UpstreamSummaryData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `configured_total` | `usize` | Total configured upstream entries. |
|
||||
| `healthy_total` | `usize` | Upstreams currently marked healthy. |
|
||||
| `unhealthy_total` | `usize` | Upstreams currently marked unhealthy. |
|
||||
| `direct_total` | `usize` | Number of direct upstream entries. |
|
||||
| `socks4_total` | `usize` | Number of SOCKS4 upstream entries. |
|
||||
| `socks5_total` | `usize` | Number of SOCKS5 upstream entries. |
|
||||
| `shadowsocks_total` | `usize` | Number of Shadowsocks upstream entries. |
|
||||
|
||||
#### `UpstreamStatus`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `upstream_id` | `usize` | Runtime upstream index. |
|
||||
| `route_kind` | `string` | Upstream route kind: `direct`, `socks4`, `socks5`, `shadowsocks`. |
|
||||
| `address` | `string` | Upstream address (`direct` for direct route kind, `host:port` for Shadowsocks). Authentication fields are intentionally omitted. |
|
||||
| `weight` | `u16` | Selection weight. |
|
||||
| `scopes` | `string` | Configured scope selector string. |
|
||||
| `healthy` | `bool` | Current health flag. |
|
||||
| `fails` | `u32` | Consecutive fail counter. |
|
||||
| `last_check_age_secs` | `u64` | Seconds since the last health-check update. |
|
||||
| `effective_latency_ms` | `f64?` | Effective upstream latency used by selector. |
|
||||
| `dc` | `UpstreamDcStatus[]` | Per-DC latency/IP preference snapshot. |
|
||||
|
||||
#### `UpstreamDcStatus`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `dc` | `i16` | Telegram DC id. |
|
||||
| `latency_ema_ms` | `f64?` | Per-DC latency EMA value. |
|
||||
| `ip_preference` | `string` | Per-DC IP family preference: `unknown`, `prefer_v4`, `prefer_v6`, `both_work`, `unavailable`. |
|
||||
|
||||
#### `ZeroMiddleProxyData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
@@ -274,7 +882,27 @@ API runtime is configured in `[server.api]`.
|
||||
| `floor_mode` | `string` | Writer floor mode. |
|
||||
| `adaptive_floor_idle_secs` | `u64` | Idle threshold for adaptive floor. |
|
||||
| `adaptive_floor_min_writers_single_endpoint` | `u8` | Minimum writers for single-endpoint DC in adaptive mode. |
|
||||
| `adaptive_floor_min_writers_multi_endpoint` | `u8` | Minimum writers for multi-endpoint DC in adaptive mode. |
|
||||
| `adaptive_floor_recover_grace_secs` | `u64` | Grace period for floor recovery. |
|
||||
| `adaptive_floor_writers_per_core_total` | `u16` | Target total writers-per-core budget in adaptive mode. |
|
||||
| `adaptive_floor_cpu_cores_override` | `u16` | CPU core override (`0` means auto-detect). |
|
||||
| `adaptive_floor_max_extra_writers_single_per_core` | `u16` | Extra single-endpoint writers budget per core. |
|
||||
| `adaptive_floor_max_extra_writers_multi_per_core` | `u16` | Extra multi-endpoint writers budget per core. |
|
||||
| `adaptive_floor_max_active_writers_per_core` | `u16` | Active writer cap per core. |
|
||||
| `adaptive_floor_max_warm_writers_per_core` | `u16` | Warm writer cap per core. |
|
||||
| `adaptive_floor_max_active_writers_global` | `u32` | Global active writer cap. |
|
||||
| `adaptive_floor_max_warm_writers_global` | `u32` | Global warm writer cap. |
|
||||
| `adaptive_floor_cpu_cores_detected` | `u32` | Runtime-detected CPU cores. |
|
||||
| `adaptive_floor_cpu_cores_effective` | `u32` | Effective core count used for adaptive caps. |
|
||||
| `adaptive_floor_global_cap_raw` | `u64` | Raw global cap before clamping. |
|
||||
| `adaptive_floor_global_cap_effective` | `u64` | Effective global cap after clamping. |
|
||||
| `adaptive_floor_target_writers_total` | `u64` | Current adaptive total writer target. |
|
||||
| `adaptive_floor_active_cap_configured` | `u64` | Configured global active cap. |
|
||||
| `adaptive_floor_active_cap_effective` | `u64` | Effective global active cap. |
|
||||
| `adaptive_floor_warm_cap_configured` | `u64` | Configured global warm cap. |
|
||||
| `adaptive_floor_warm_cap_effective` | `u64` | Effective global warm cap. |
|
||||
| `adaptive_floor_active_writers_current` | `u64` | Current active writers count. |
|
||||
| `adaptive_floor_warm_writers_current` | `u64` | Current warm writers count. |
|
||||
| `me_keepalive_enabled` | `bool` | ME keepalive toggle. |
|
||||
| `me_keepalive_interval_secs` | `u64` | Keepalive period. |
|
||||
| `me_keepalive_jitter_secs` | `u64` | Keepalive jitter. |
|
||||
@@ -296,6 +924,8 @@ API runtime is configured in `[server.api]`.
|
||||
| `me_single_endpoint_outage_backoff_max_ms` | `u64` | Outage mode max reconnect backoff. |
|
||||
| `me_single_endpoint_shadow_rotate_every_secs` | `u64` | Shadow rotation interval. |
|
||||
| `me_deterministic_writer_sort` | `bool` | Deterministic writer ordering toggle. |
|
||||
| `me_writer_pick_mode` | `string` | Writer picker mode (`sorted_rr`, `p2c`). |
|
||||
| `me_writer_pick_sample_size` | `u8` | Candidate sample size for `p2c` picker mode. |
|
||||
| `me_socks_kdf_policy` | `string` | Current SOCKS KDF policy mode. |
|
||||
| `quarantined_endpoints_total` | `usize` | Total quarantined endpoints. |
|
||||
| `quarantined_endpoints` | `MinimalQuarantineData[]` | Quarantine details. |
|
||||
@@ -361,14 +991,25 @@ API runtime is configured in `[server.api]`.
|
||||
| --- | --- | --- |
|
||||
| `dc` | `i16` | Telegram DC id. |
|
||||
| `endpoints` | `string[]` | Endpoints in this DC (`ip:port`). |
|
||||
| `endpoint_writers` | `DcEndpointWriters[]` | Active writer counts grouped by endpoint. |
|
||||
| `available_endpoints` | `usize` | Endpoints currently available in this DC. |
|
||||
| `available_pct` | `f64` | `available_endpoints / endpoints_total * 100`. |
|
||||
| `required_writers` | `usize` | Required writer count for this DC. |
|
||||
| `floor_min` | `usize` | Floor lower bound for this DC. |
|
||||
| `floor_target` | `usize` | Floor target writer count for this DC. |
|
||||
| `floor_max` | `usize` | Floor upper bound for this DC. |
|
||||
| `floor_capped` | `bool` | `true` when computed floor target was capped by active limits. |
|
||||
| `alive_writers` | `usize` | Alive writers in this DC. |
|
||||
| `coverage_pct` | `f64` | `alive_writers / required_writers * 100`. |
|
||||
| `rtt_ms` | `f64?` | Aggregated RTT for DC. |
|
||||
| `load` | `usize` | Active client sessions bound to this DC. |
|
||||
|
||||
#### `DcEndpointWriters`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `endpoint` | `string` | Endpoint (`ip:port`). |
|
||||
| `active_writers` | `usize` | Active writers currently mapped to endpoint. |
|
||||
|
||||
### `UserInfo`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
@@ -380,6 +1021,9 @@ API runtime is configured in `[server.api]`.
|
||||
| `max_unique_ips` | `usize?` | Optional unique IP limit. |
|
||||
| `current_connections` | `u64` | Current live connections. |
|
||||
| `active_unique_ips` | `usize` | Current active unique source IPs. |
|
||||
| `active_unique_ips_list` | `ip[]` | Current active unique source IP list. |
|
||||
| `recent_unique_ips` | `usize` | Unique source IP count inside the configured recent window. |
|
||||
| `recent_unique_ips_list` | `ip[]` | Recent-window unique source IP list. |
|
||||
| `total_octets` | `u64` | Total traffic octets for this user. |
|
||||
| `links` | `UserLinks` | Active connection links derived from current config. |
|
||||
|
||||
@@ -391,9 +1035,16 @@ API runtime is configured in `[server.api]`.
|
||||
| `tls` | `string[]` | Active `tg://proxy` links for EE-TLS mode (for each host+TLS domain). |
|
||||
|
||||
Link generation uses active config and enabled modes:
|
||||
- `[general.links].public_host/public_port` have priority.
|
||||
- Fallback host sources: listener `announce`, `announce_ip`, explicit listener `ip`.
|
||||
- Legacy fallback: `listen_addr_ipv4` and `listen_addr_ipv6` when routable.
|
||||
- Link port is `general.links.public_port` when configured; otherwise `server.port`.
|
||||
- If `general.links.public_host` is non-empty, it is used as the single link host override.
|
||||
- If `public_host` is not set, hosts are resolved from `server.listeners` in order:
|
||||
`announce` -> `announce_ip` -> listener bind `ip`.
|
||||
- For wildcard listener IPs (`0.0.0.0` / `::`), startup-detected external IP of the same family is used when available.
|
||||
- Listener-derived hosts are de-duplicated while preserving first-seen order.
|
||||
- If multiple hosts are resolved, API returns links for all resolved hosts in every enabled mode.
|
||||
- If no host can be resolved from listeners, fallback is startup-detected `IPv4 -> IPv6`.
|
||||
- Final compatibility fallback uses `listen_addr_ipv4`/`listen_addr_ipv6` when routable, otherwise `"UNKNOWN"`.
|
||||
- User rows are sorted by `username` in ascending lexical order.
|
||||
|
||||
### `CreateUserResponse`
|
||||
| Field | Type | Description |
|
||||
@@ -405,23 +1056,82 @@ Link generation uses active config and enabled modes:
|
||||
|
||||
| Endpoint | Notes |
|
||||
| --- | --- |
|
||||
| `POST /v1/users` | Creates user and validates resulting config before atomic save. |
|
||||
| `PATCH /v1/users/{username}` | Partial update of provided fields only. Missing fields remain unchanged. |
|
||||
| `POST /v1/users/{username}/rotate-secret` | Replaces secret. Empty body is allowed and auto-generates secret. |
|
||||
| `DELETE /v1/users/{username}` | Deletes user and related optional settings. Last user deletion is blocked. |
|
||||
| `POST /v1/users` | Creates user, validates config, then atomically updates only affected `access.*` TOML tables (`access.users` always, plus optional per-user tables present in request). |
|
||||
| `PATCH /v1/users/{username}` | Partial update of provided fields only. Missing fields remain unchanged. Current implementation persists full config document on success. |
|
||||
| `POST /v1/users/{username}/rotate-secret` | Currently returns `404` in runtime route matcher; request schema is reserved for intended behavior. |
|
||||
| `DELETE /v1/users/{username}` | Deletes only specified user, removes this user from related optional `access.user_*` maps, blocks last-user deletion, and atomically updates only related `access.*` TOML tables. |
|
||||
|
||||
All mutating endpoints:
|
||||
- Respect `read_only` mode.
|
||||
- Accept optional `If-Match` for optimistic concurrency.
|
||||
- Return new `revision` after successful write.
|
||||
- Use process-local mutation lock + atomic write (`tmp + rename`) for config persistence.
|
||||
|
||||
Delete path cleanup guarantees:
|
||||
- Config cleanup removes only the requested username keys.
|
||||
- Runtime unique-IP cleanup removes only this user's limiter and tracked IP state.
|
||||
|
||||
## Runtime State Matrix
|
||||
|
||||
| Endpoint | `minimal_runtime_enabled=false` | `minimal_runtime_enabled=true` + source unavailable | `minimal_runtime_enabled=true` + source available |
|
||||
| --- | --- | --- | --- |
|
||||
| `/v1/stats/minimal/all` | `enabled=false`, `reason=feature_disabled`, `data=null` | `enabled=true`, `reason=source_unavailable`, fallback `data` with disabled ME blocks | `enabled=true`, `reason` omitted, full payload |
|
||||
| `/v1/stats/me-writers` | `middle_proxy_enabled=false`, `reason=feature_disabled` | `middle_proxy_enabled=false`, `reason=source_unavailable` | `middle_proxy_enabled=true`, runtime snapshot |
|
||||
| `/v1/stats/dcs` | `middle_proxy_enabled=false`, `reason=feature_disabled` | `middle_proxy_enabled=false`, `reason=source_unavailable` | `middle_proxy_enabled=true`, runtime snapshot |
|
||||
| `/v1/stats/upstreams` | `enabled=false`, `reason=feature_disabled`, `summary/upstreams` omitted, `zero` still present | `enabled=true`, `reason=source_unavailable`, `summary/upstreams` omitted, `zero` present | `enabled=true`, `reason` omitted, `summary/upstreams` present, `zero` present |
|
||||
|
||||
`source_unavailable` conditions:
|
||||
- ME endpoints: ME pool is absent (for example direct-only mode or failed ME initialization).
|
||||
- Upstreams endpoint: non-blocking upstream snapshot lock is unavailable at request time.
|
||||
|
||||
Additional runtime endpoint behavior:
|
||||
|
||||
| Endpoint | Disabled by feature flag | `source_unavailable` condition | Normal mode |
|
||||
| --- | --- | --- | --- |
|
||||
| `/v1/runtime/me_pool_state` | No | ME pool snapshot unavailable | `enabled=true`, full payload |
|
||||
| `/v1/runtime/me_quality` | No | ME pool snapshot unavailable | `enabled=true`, full payload |
|
||||
| `/v1/runtime/upstream_quality` | No | Upstream runtime snapshot unavailable | `enabled=true`, full payload |
|
||||
| `/v1/runtime/nat_stun` | No | STUN shared state unavailable | `enabled=true`, full payload |
|
||||
| `/v1/runtime/me-selftest` | No | ME pool unavailable => `enabled=false`, `reason=source_unavailable` | `enabled=true`, full payload |
|
||||
| `/v1/runtime/connections/summary` | `runtime_edge_enabled=false` => `enabled=false`, `reason=feature_disabled` | Recompute lock contention with no cache entry => `enabled=true`, `reason=source_unavailable` | `enabled=true`, full payload |
|
||||
| `/v1/runtime/events/recent` | `runtime_edge_enabled=false` => `enabled=false`, `reason=feature_disabled` | Not used in current implementation | `enabled=true`, full payload |
|
||||
|
||||
## ME Fallback Behavior Exposed Via API
|
||||
|
||||
When `general.use_middle_proxy=true` and `general.me2dc_fallback=true`:
|
||||
- Startup does not block on full ME pool readiness; initialization can continue in background.
|
||||
- Runtime initialization payload can expose ME stage `background_init` until pool becomes ready.
|
||||
- Admission/routing decision uses two readiness grace windows for "ME not ready" periods:
|
||||
`80s` before first-ever readiness is observed (startup grace),
|
||||
`6s` after readiness has been observed at least once (runtime failover timeout).
|
||||
- While in fallback window breach, new sessions are routed via Direct-DC; when ME becomes ready, routing returns to Middle mode for new sessions.
|
||||
|
||||
## Serialization Rules
|
||||
|
||||
- Success responses always include `revision`.
|
||||
- Error responses never include `revision`; they include `request_id`.
|
||||
- Optional fields with `skip_serializing_if` are omitted when absent.
|
||||
- Nullable payload fields may still be `null` where contract uses `?` (for example `UserInfo` option fields).
|
||||
- For `/v1/stats/upstreams`, authentication details of SOCKS upstreams are intentionally omitted.
|
||||
- `ip[]` fields are serialized as JSON string arrays (for example `"1.2.3.4"`, `"2001:db8::1"`).
|
||||
|
||||
## Operational Notes
|
||||
|
||||
| Topic | Details |
|
||||
| --- | --- |
|
||||
| API startup | API binds only when `[server.api].enabled=true`. |
|
||||
| Restart requirements | Changes in `server.api` settings require process restart. |
|
||||
| API startup | API listener is spawned only when `[server.api].enabled=true`. |
|
||||
| `listen` port `0` | API spawn is skipped when parsed listen port is `0` (treated as disabled bind target). |
|
||||
| Bind failure | Failed API bind logs warning and API task exits (no auto-retry loop). |
|
||||
| ME runtime status endpoints | `/v1/stats/me-writers`, `/v1/stats/dcs`, `/v1/stats/minimal/all` require `[server.api].minimal_runtime_enabled=true`; otherwise they return disabled payload with `reason=feature_disabled`. |
|
||||
| Upstream runtime endpoint | `/v1/stats/upstreams` always returns `zero`, but runtime fields (`summary`, `upstreams`) require `[server.api].minimal_runtime_enabled=true`. |
|
||||
| Restart requirements | `server.api` changes are restart-required for predictable behavior. |
|
||||
| Hot-reload nuance | A pure `server.api`-only config change may not propagate through watcher broadcast; a mixed change (with hot fields) may propagate API flags while still warning that restart is required. |
|
||||
| Runtime apply path | Successful writes are picked up by existing config watcher/hot-reload path. |
|
||||
| Exposure | Built-in TLS/mTLS is not provided. Use loopback bind + reverse proxy if needed. |
|
||||
| Pagination | User list currently has no pagination/filtering. |
|
||||
| Serialization side effect | Config comments/manual formatting are not preserved on write. |
|
||||
| Serialization side effect | Updated TOML table bodies are re-serialized on write. Endpoints that persist full config can still rewrite broader formatting/comments. |
|
||||
|
||||
## Known Limitations (Current Release)
|
||||
|
||||
- `POST /v1/users/{username}/rotate-secret` is currently unreachable in route matcher and returns `404`.
|
||||
- API runtime controls under `server.api` are documented as restart-required; hot-reload behavior for these fields is not strictly uniform in all change combinations.
|
||||
|
||||
427
docs/CONFIG_PARAMS.en.md
Normal file
427
docs/CONFIG_PARAMS.en.md
Normal file
@@ -0,0 +1,427 @@
|
||||
# Telemt Config Parameters Reference
|
||||
|
||||
This document lists all configuration keys accepted by `config.toml`.
|
||||
|
||||
> [!WARNING]
|
||||
>
|
||||
> The configuration parameters detailed in this document are intended for advanced users and fine-tuning purposes. Modifying these settings without a clear understanding of their function may lead to application instability or other unexpected behavior. Please proceed with caution and at your own risk.
|
||||
|
||||
## Top-level keys
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| include | `String` (special directive) | `null` | — | Includes another TOML file with `include = "relative/or/absolute/path.toml"`; includes are processed recursively before parsing. |
|
||||
| show_link | `"*" \| String[]` | `[]` (`ShowLink::None`) | — | Legacy top-level link visibility selector (`"*"` for all users or explicit usernames list). |
|
||||
| dc_overrides | `Map<String, String[]>` | `{}` | — | Overrides DC endpoints for non-standard DCs; key is DC id string, value is `ip:port` list. |
|
||||
| default_dc | `u8 \| null` | `null` (effective fallback: `2` in ME routing) | — | Default DC index used for unmapped non-standard DCs. |
|
||||
|
||||
## [general]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| data_path | `String \| null` | `null` | — | Optional runtime data directory path. |
|
||||
| prefer_ipv6 | `bool` | `false` | Deprecated. Use `network.prefer`. | Deprecated legacy IPv6 preference flag migrated to `network.prefer`. |
|
||||
| fast_mode | `bool` | `true` | — | Enables fast-path optimizations for traffic processing. |
|
||||
| use_middle_proxy | `bool` | `true` | none | Enables ME transport mode; if `false`, runtime falls back to direct DC routing. |
|
||||
| proxy_secret_path | `String \| null` | `"proxy-secret"` | Path may be `null`. | Path to Telegram infrastructure proxy-secret file used by ME handshake logic. |
|
||||
| proxy_config_v4_cache_path | `String \| null` | `"cache/proxy-config-v4.txt"` | — | Optional cache path for raw `getProxyConfig` (IPv4) snapshot. |
|
||||
| proxy_config_v6_cache_path | `String \| null` | `"cache/proxy-config-v6.txt"` | — | Optional cache path for raw `getProxyConfigV6` (IPv6) snapshot. |
|
||||
| ad_tag | `String \| null` | `null` | — | Global fallback ad tag (32 hex characters). |
|
||||
| middle_proxy_nat_ip | `IpAddr \| null` | `null` | Must be a valid IP when set. | Manual public NAT IP override used as ME address material when set. |
|
||||
| middle_proxy_nat_probe | `bool` | `true` | Auto-forced to `true` when `use_middle_proxy = true`. | Enables ME NAT probing; runtime may force it on when ME mode is active. |
|
||||
| middle_proxy_nat_stun | `String \| null` | `null` | Deprecated. Use `network.stun_servers`. | Deprecated legacy single STUN server for NAT probing. |
|
||||
| middle_proxy_nat_stun_servers | `String[]` | `[]` | Deprecated. Use `network.stun_servers`. | Deprecated legacy STUN list for NAT probing fallback. |
|
||||
| stun_nat_probe_concurrency | `usize` | `8` | Must be `> 0`. | Maximum number of parallel STUN probes during NAT/public endpoint discovery. |
|
||||
| middle_proxy_pool_size | `usize` | `8` | none | Target size of active ME writer pool. |
|
||||
| middle_proxy_warm_standby | `usize` | `16` | none | Reserved compatibility field in current runtime revision. |
|
||||
| me_init_retry_attempts | `u32` | `0` | `0..=1_000_000`. | Startup retries for ME pool initialization (`0` means unlimited). |
|
||||
| me2dc_fallback | `bool` | `true` | — | Allows fallback from ME mode to direct DC when ME startup fails. |
|
||||
| me_keepalive_enabled | `bool` | `true` | none | Enables periodic ME keepalive/ping traffic. |
|
||||
| me_keepalive_interval_secs | `u64` | `8` | none | Base ME keepalive interval in seconds. |
|
||||
| me_keepalive_jitter_secs | `u64` | `2` | none | Keepalive jitter in seconds to reduce synchronized bursts. |
|
||||
| me_keepalive_payload_random | `bool` | `true` | none | Randomizes keepalive payload bytes instead of fixed zero payload. |
|
||||
| rpc_proxy_req_every | `u64` | `0` | `0` or `10..=300`. | Interval for service `RPC_PROXY_REQ` activity signals (`0` disables). |
|
||||
| me_writer_cmd_channel_capacity | `usize` | `4096` | Must be `> 0`. | Capacity of per-writer command channel. |
|
||||
| me_route_channel_capacity | `usize` | `768` | Must be `> 0`. | Capacity of per-connection ME response route channel. |
|
||||
| me_c2me_channel_capacity | `usize` | `1024` | Must be `> 0`. | Capacity of per-client command queue (client reader -> ME sender). |
|
||||
| me_c2me_send_timeout_ms | `u64` | `4000` | `0..=60000`. | Maximum wait for enqueueing client->ME commands when the per-client queue is full (`0` keeps legacy unbounded wait). |
|
||||
| me_reader_route_data_wait_ms | `u64` | `2` | `0..=20`. | Bounded wait for routing ME DATA to per-connection queue (`0` = no wait). |
|
||||
| me_d2c_flush_batch_max_frames | `usize` | `32` | `1..=512`. | Max ME->client frames coalesced before flush. |
|
||||
| me_d2c_flush_batch_max_bytes | `usize` | `131072` | `4096..=2_097_152`. | Max ME->client payload bytes coalesced before flush. |
|
||||
| me_d2c_flush_batch_max_delay_us | `u64` | `500` | `0..=5000`. | Max microsecond wait for coalescing more ME->client frames (`0` disables timed coalescing). |
|
||||
| me_d2c_ack_flush_immediate | `bool` | `true` | — | Flushes client writer immediately after quick-ack write. |
|
||||
| direct_relay_copy_buf_c2s_bytes | `usize` | `65536` | `4096..=1_048_576`. | Copy buffer size for client->DC direction in direct relay. |
|
||||
| direct_relay_copy_buf_s2c_bytes | `usize` | `262144` | `8192..=2_097_152`. | Copy buffer size for DC->client direction in direct relay. |
|
||||
| crypto_pending_buffer | `usize` | `262144` | — | Max pending ciphertext buffer per client writer (bytes). |
|
||||
| max_client_frame | `usize` | `16777216` | — | Maximum allowed client MTProto frame size (bytes). |
|
||||
| desync_all_full | `bool` | `false` | — | Emits full crypto-desync forensic logs for every event. |
|
||||
| beobachten | `bool` | `true` | — | Enables per-IP forensic observation buckets. |
|
||||
| beobachten_minutes | `u64` | `10` | Must be `> 0`. | Retention window (minutes) for per-IP observation buckets. |
|
||||
| beobachten_flush_secs | `u64` | `15` | Must be `> 0`. | Snapshot flush interval (seconds) for observation output file. |
|
||||
| beobachten_file | `String` | `"cache/beobachten.txt"` | — | Observation snapshot output file path. |
|
||||
| hardswap | `bool` | `true` | none | Enables generation-based ME hardswap strategy. |
|
||||
| me_warmup_stagger_enabled | `bool` | `true` | none | Staggers extra ME warmup dials to avoid connection spikes. |
|
||||
| me_warmup_step_delay_ms | `u64` | `500` | none | Base delay in milliseconds between warmup dial steps. |
|
||||
| me_warmup_step_jitter_ms | `u64` | `300` | none | Additional random delay in milliseconds for warmup steps. |
|
||||
| me_reconnect_max_concurrent_per_dc | `u32` | `8` | none | Limits concurrent reconnect workers per DC during health recovery. |
|
||||
| me_reconnect_backoff_base_ms | `u64` | `500` | none | Initial reconnect backoff in milliseconds. |
|
||||
| me_reconnect_backoff_cap_ms | `u64` | `30000` | none | Maximum reconnect backoff cap in milliseconds. |
|
||||
| me_reconnect_fast_retry_count | `u32` | `16` | none | Immediate retry budget before long backoff behavior applies. |
|
||||
| me_single_endpoint_shadow_writers | `u8` | `2` | `0..=32`. | Additional reserve writers for one-endpoint DC groups. |
|
||||
| me_single_endpoint_outage_mode_enabled | `bool` | `true` | — | Enables aggressive outage recovery for one-endpoint DC groups. |
|
||||
| me_single_endpoint_outage_disable_quarantine | `bool` | `true` | — | Ignores endpoint quarantine in one-endpoint outage mode. |
|
||||
| me_single_endpoint_outage_backoff_min_ms | `u64` | `250` | Must be `> 0`; also `<= me_single_endpoint_outage_backoff_max_ms`. | Minimum reconnect backoff in outage mode (ms). |
|
||||
| me_single_endpoint_outage_backoff_max_ms | `u64` | `3000` | Must be `> 0`; also `>= me_single_endpoint_outage_backoff_min_ms`. | Maximum reconnect backoff in outage mode (ms). |
|
||||
| me_single_endpoint_shadow_rotate_every_secs | `u64` | `900` | — | Periodic shadow writer rotation interval (`0` disables). |
|
||||
| me_floor_mode | `"static" \| "adaptive"` | `"adaptive"` | — | Writer floor policy mode. |
|
||||
| me_adaptive_floor_idle_secs | `u64` | `90` | — | Idle time before adaptive floor may reduce one-endpoint target. |
|
||||
| me_adaptive_floor_min_writers_single_endpoint | `u8` | `1` | `1..=32`. | Minimum adaptive writer target for one-endpoint DC groups. |
|
||||
| me_adaptive_floor_min_writers_multi_endpoint | `u8` | `1` | `1..=32`. | Minimum adaptive writer target for multi-endpoint DC groups. |
|
||||
| me_adaptive_floor_recover_grace_secs | `u64` | `180` | — | Grace period to hold static floor after activity. |
|
||||
| me_adaptive_floor_writers_per_core_total | `u16` | `48` | Must be `> 0`. | Global writer budget per logical CPU core in adaptive mode. |
|
||||
| me_adaptive_floor_cpu_cores_override | `u16` | `0` | — | Manual CPU core count override (`0` uses auto-detection). |
|
||||
| me_adaptive_floor_max_extra_writers_single_per_core | `u16` | `1` | — | Per-core max extra writers above base floor for one-endpoint DCs. |
|
||||
| me_adaptive_floor_max_extra_writers_multi_per_core | `u16` | `2` | — | Per-core max extra writers above base floor for multi-endpoint DCs. |
|
||||
| me_adaptive_floor_max_active_writers_per_core | `u16` | `64` | Must be `> 0`. | Hard cap for active ME writers per logical CPU core. |
|
||||
| me_adaptive_floor_max_warm_writers_per_core | `u16` | `64` | Must be `> 0`. | Hard cap for warm ME writers per logical CPU core. |
|
||||
| me_adaptive_floor_max_active_writers_global | `u32` | `256` | Must be `> 0`. | Hard global cap for active ME writers. |
|
||||
| me_adaptive_floor_max_warm_writers_global | `u32` | `256` | Must be `> 0`. | Hard global cap for warm ME writers. |
|
||||
| upstream_connect_retry_attempts | `u32` | `2` | Must be `> 0`. | Connect attempts for selected upstream before error/fallback. |
|
||||
| upstream_connect_retry_backoff_ms | `u64` | `100` | — | Delay between upstream connect attempts (ms). |
|
||||
| upstream_connect_budget_ms | `u64` | `3000` | Must be `> 0`. | Total wall-clock budget for one upstream connect request (ms). |
|
||||
| upstream_unhealthy_fail_threshold | `u32` | `5` | Must be `> 0`. | Consecutive failed requests before upstream is marked unhealthy. |
|
||||
| upstream_connect_failfast_hard_errors | `bool` | `false` | — | Skips additional retries for hard non-transient connect errors. |
|
||||
| stun_iface_mismatch_ignore | `bool` | `false` | none | Reserved compatibility flag in current runtime revision. |
|
||||
| unknown_dc_log_path | `String \| null` | `"unknown-dc.txt"` | — | File path for unknown-DC request logging (`null` disables file path). |
|
||||
| unknown_dc_file_log_enabled | `bool` | `false` | — | Enables unknown-DC file logging. |
|
||||
| log_level | `"debug" \| "verbose" \| "normal" \| "silent"` | `"normal"` | — | Runtime logging verbosity. |
|
||||
| disable_colors | `bool` | `false` | — | Disables ANSI colors in logs. |
|
||||
| me_socks_kdf_policy | `"strict" \| "compat"` | `"strict"` | — | SOCKS-bound KDF fallback policy for ME handshake. |
|
||||
| me_route_backpressure_base_timeout_ms | `u64` | `25` | Must be `> 0`. | Base backpressure timeout for route-channel send (ms). |
|
||||
| me_route_backpressure_high_timeout_ms | `u64` | `120` | Must be `>= me_route_backpressure_base_timeout_ms`. | High backpressure timeout when queue occupancy exceeds watermark (ms). |
|
||||
| me_route_backpressure_high_watermark_pct | `u8` | `80` | `1..=100`. | Queue occupancy threshold (%) for high timeout mode. |
|
||||
| me_health_interval_ms_unhealthy | `u64` | `1000` | Must be `> 0`. | Health monitor interval while writer coverage is degraded (ms). |
|
||||
| me_health_interval_ms_healthy | `u64` | `3000` | Must be `> 0`. | Health monitor interval while writer coverage is healthy (ms). |
|
||||
| me_admission_poll_ms | `u64` | `1000` | Must be `> 0`. | Poll interval for conditional-admission checks (ms). |
|
||||
| me_warn_rate_limit_ms | `u64` | `5000` | Must be `> 0`. | Cooldown for repetitive ME warning logs (ms). |
|
||||
| me_route_no_writer_mode | `"async_recovery_failfast" \| "inline_recovery_legacy" \| "hybrid_async_persistent"` | `"hybrid_async_persistent"` | — | Route behavior when no writer is immediately available. |
|
||||
| me_route_no_writer_wait_ms | `u64` | `250` | `10..=5000`. | Max wait in async-recovery failfast mode (ms). |
|
||||
| me_route_hybrid_max_wait_ms | `u64` | `3000` | `50..=60000`. | Maximum cumulative wait in hybrid no-writer mode before failfast fallback (ms). |
|
||||
| me_route_blocking_send_timeout_ms | `u64` | `250` | `0..=5000`. | Maximum wait for blocking route-channel send fallback (`0` keeps legacy unbounded wait). |
|
||||
| me_route_inline_recovery_attempts | `u32` | `3` | Must be `> 0`. | Inline recovery attempts in legacy mode. |
|
||||
| me_route_inline_recovery_wait_ms | `u64` | `3000` | `10..=30000`. | Max inline recovery wait in legacy mode (ms). |
|
||||
| fast_mode_min_tls_record | `usize` | `0` | — | Minimum TLS record size when fast-mode coalescing is enabled (`0` disables). |
|
||||
| update_every | `u64 \| null` | `300` | If set: must be `> 0`; if `null`: legacy fallback path is used. | Unified refresh interval for ME config and proxy-secret updater tasks. |
|
||||
| me_reinit_every_secs | `u64` | `900` | Must be `> 0`. | Periodic interval for zero-downtime ME reinit cycle. |
|
||||
| me_hardswap_warmup_delay_min_ms | `u64` | `1000` | Must be `<= me_hardswap_warmup_delay_max_ms`. | Lower bound for hardswap warmup dial spacing. |
|
||||
| me_hardswap_warmup_delay_max_ms | `u64` | `2000` | Must be `> 0`. | Upper bound for hardswap warmup dial spacing. |
|
||||
| me_hardswap_warmup_extra_passes | `u8` | `3` | Must be within `[0, 10]`. | Additional warmup passes after the base pass in one hardswap cycle. |
|
||||
| me_hardswap_warmup_pass_backoff_base_ms | `u64` | `500` | Must be `> 0`. | Base backoff between extra hardswap warmup passes. |
|
||||
| me_config_stable_snapshots | `u8` | `2` | Must be `> 0`. | Number of identical ME config snapshots required before apply. |
|
||||
| me_config_apply_cooldown_secs | `u64` | `300` | none | Cooldown between applied ME endpoint-map updates. |
|
||||
| me_snapshot_require_http_2xx | `bool` | `true` | — | Requires 2xx HTTP responses for applying config snapshots. |
|
||||
| me_snapshot_reject_empty_map | `bool` | `true` | — | Rejects empty config snapshots. |
|
||||
| me_snapshot_min_proxy_for_lines | `u32` | `1` | Must be `> 0`. | Minimum parsed `proxy_for` rows required to accept snapshot. |
|
||||
| proxy_secret_stable_snapshots | `u8` | `2` | Must be `> 0`. | Number of identical proxy-secret snapshots required before rotation. |
|
||||
| proxy_secret_rotate_runtime | `bool` | `true` | none | Enables runtime proxy-secret rotation from updater snapshots. |
|
||||
| me_secret_atomic_snapshot | `bool` | `true` | — | Keeps selector and secret bytes from the same snapshot atomically. |
|
||||
| proxy_secret_len_max | `usize` | `256` | Must be within `[32, 4096]`. | Upper length limit for accepted proxy-secret bytes. |
|
||||
| me_pool_drain_ttl_secs | `u64` | `90` | none | Time window where stale writers remain fallback-eligible after map change. |
|
||||
| me_instadrain | `bool` | `false` | — | Forces draining stale writers to be removed on the next cleanup tick, bypassing TTL/deadline waiting. |
|
||||
| me_pool_drain_threshold | `u64` | `128` | — | Max draining stale writers before batch force-close (`0` disables threshold cleanup). |
|
||||
| me_pool_drain_soft_evict_enabled | `bool` | `true` | — | Enables gradual soft-eviction of stale writers during drain/reinit instead of immediate hard close. |
|
||||
| me_pool_drain_soft_evict_grace_secs | `u64` | `30` | `0..=3600`. | Grace period before stale writers become soft-evict candidates. |
|
||||
| me_pool_drain_soft_evict_per_writer | `u8` | `1` | `1..=16`. | Maximum stale routes soft-evicted per writer in one eviction pass. |
|
||||
| me_pool_drain_soft_evict_budget_per_core | `u16` | `8` | `1..=64`. | Per-core budget limiting aggregate soft-eviction work per pass. |
|
||||
| me_pool_drain_soft_evict_cooldown_ms | `u64` | `5000` | Must be `> 0`. | Cooldown between consecutive soft-eviction passes (ms). |
|
||||
| me_bind_stale_mode | `"never" \| "ttl" \| "always"` | `"ttl"` | — | Policy for new binds on stale draining writers. |
|
||||
| me_bind_stale_ttl_secs | `u64` | `90` | — | TTL for stale bind allowance when stale mode is `ttl`. |
|
||||
| me_pool_min_fresh_ratio | `f32` | `0.8` | Must be within `[0.0, 1.0]`. | Minimum fresh desired-DC coverage ratio before stale writers are drained. |
|
||||
| me_reinit_drain_timeout_secs | `u64` | `120` | `0` disables force-close; if `> 0` and `< me_pool_drain_ttl_secs`, runtime bumps it to TTL. | Force-close timeout for draining stale writers (`0` keeps indefinite draining). |
|
||||
| proxy_secret_auto_reload_secs | `u64` | `3600` | Deprecated. Use `general.update_every`. | Deprecated legacy secret reload interval (fallback when `update_every` is not set). |
|
||||
| proxy_config_auto_reload_secs | `u64` | `3600` | Deprecated. Use `general.update_every`. | Deprecated legacy config reload interval (fallback when `update_every` is not set). |
|
||||
| me_reinit_singleflight | `bool` | `true` | — | Serializes ME reinit cycles across trigger sources. |
|
||||
| me_reinit_trigger_channel | `usize` | `64` | Must be `> 0`. | Trigger queue capacity for reinit scheduler. |
|
||||
| me_reinit_coalesce_window_ms | `u64` | `200` | — | Trigger coalescing window before starting reinit (ms). |
|
||||
| me_deterministic_writer_sort | `bool` | `true` | — | Enables deterministic candidate sort for writer binding path. |
|
||||
| me_writer_pick_mode | `"sorted_rr" \| "p2c"` | `"p2c"` | — | Writer selection mode for route bind path. |
|
||||
| me_writer_pick_sample_size | `u8` | `3` | `2..=4`. | Number of candidates sampled by picker in `p2c` mode. |
|
||||
| ntp_check | `bool` | `true` | — | Enables NTP drift check at startup. |
|
||||
| ntp_servers | `String[]` | `["pool.ntp.org"]` | — | NTP servers used for drift check. |
|
||||
| auto_degradation_enabled | `bool` | `true` | none | Reserved compatibility flag in current runtime revision. |
|
||||
| degradation_min_unavailable_dc_groups | `u8` | `2` | none | Reserved compatibility threshold in current runtime revision. |
|
||||
|
||||
## [general.modes]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| classic | `bool` | `false` | — | Enables classic MTProxy mode. |
|
||||
| secure | `bool` | `false` | — | Enables secure mode. |
|
||||
| tls | `bool` | `true` | — | Enables TLS mode. |
|
||||
|
||||
## [general.links]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| show | `"*" \| String[]` | `"*"` | — | Selects users whose tg:// links are shown at startup. |
|
||||
| public_host | `String \| null` | `null` | — | Public hostname/IP override for generated tg:// links. |
|
||||
| public_port | `u16 \| null` | `null` | — | Public port override for generated tg:// links. |
|
||||
|
||||
## [general.telemetry]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| core_enabled | `bool` | `true` | — | Enables core hot-path telemetry counters. |
|
||||
| user_enabled | `bool` | `true` | — | Enables per-user telemetry counters. |
|
||||
| me_level | `"silent" \| "normal" \| "debug"` | `"normal"` | — | Middle-End telemetry verbosity level. |
|
||||
|
||||
## [network]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| ipv4 | `bool` | `true` | — | Enables IPv4 networking. |
|
||||
| ipv6 | `bool` | `false` | — | Enables/disables IPv6 when set |
|
||||
| prefer | `u8` | `4` | Must be `4` or `6`. | Preferred IP family for selection (`4` or `6`). |
|
||||
| multipath | `bool` | `false` | — | Enables multipath behavior where supported. |
|
||||
| stun_use | `bool` | `true` | none | Global STUN switch; when `false`, STUN probing path is disabled. |
|
||||
| stun_servers | `String[]` | Built-in STUN list (13 hosts) | Deduplicated; empty values are removed. | Primary STUN server list for NAT/public endpoint discovery. |
|
||||
| stun_tcp_fallback | `bool` | `true` | none | Enables TCP fallback for STUN when UDP path is blocked. |
|
||||
| http_ip_detect_urls | `String[]` | `["https://ifconfig.me/ip", "https://api.ipify.org"]` | none | HTTP fallback endpoints for public IP detection when STUN is unavailable. |
|
||||
| cache_public_ip_path | `String` | `"cache/public_ip.txt"` | — | File path for caching detected public IP. |
|
||||
| dns_overrides | `String[]` | `[]` | Must match `host:port:ip`; IPv6 must be bracketed. | Runtime DNS overrides in `host:port:ip` format. |
|
||||
|
||||
## [server]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| port | `u16` | `443` | — | Main proxy listen port. |
|
||||
| listen_addr_ipv4 | `String \| null` | `"0.0.0.0"` | — | IPv4 bind address for TCP listener. |
|
||||
| listen_addr_ipv6 | `String \| null` | `"::"` | — | IPv6 bind address for TCP listener. |
|
||||
| listen_unix_sock | `String \| null` | `null` | — | Unix socket path for listener. |
|
||||
| listen_unix_sock_perm | `String \| null` | `null` | — | Unix socket permissions in octal string (e.g., `"0666"`). |
|
||||
| listen_tcp | `bool \| null` | `null` (auto) | — | Explicit TCP listener enable/disable override. |
|
||||
| proxy_protocol | `bool` | `false` | — | Enables HAProxy PROXY protocol parsing on incoming client connections. |
|
||||
| proxy_protocol_header_timeout_ms | `u64` | `500` | Must be `> 0`. | Timeout for PROXY protocol header read/parse (ms). |
|
||||
| proxy_protocol_trusted_cidrs | `IpNetwork[]` | `[]` | — | When non-empty, only connections from these proxy source CIDRs are allowed to provide PROXY protocol headers. If empty, PROXY headers are rejected by default (security hardening). |
|
||||
| metrics_port | `u16 \| null` | `null` | — | Metrics endpoint port (enables metrics listener). |
|
||||
| metrics_listen | `String \| null` | `null` | — | Full metrics bind address (`IP:PORT`), overrides `metrics_port`. |
|
||||
| metrics_whitelist | `IpNetwork[]` | `["127.0.0.1/32", "::1/128"]` | — | CIDR whitelist for metrics endpoint access. |
|
||||
| max_connections | `u32` | `10000` | — | Max concurrent client connections (`0` = unlimited). |
|
||||
| accept_permit_timeout_ms | `u64` | `250` | `0..=60000`. | Maximum wait for acquiring a connection-slot permit before the accepted connection is dropped (`0` keeps legacy unbounded wait). |
|
||||
|
||||
Note: When `server.proxy_protocol` is enabled, incoming PROXY protocol headers are parsed from the first bytes of the connection and the client source address is replaced with `src_addr` from the header. For security, the peer source IP (the direct connection address) is verified against `server.proxy_protocol_trusted_cidrs`; if this list is empty, PROXY headers are rejected and the connection is considered untrusted.
|
||||
|
||||
## [server.api]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| enabled | `bool` | `true` | — | Enables control-plane REST API. |
|
||||
| listen | `String` | `"0.0.0.0:9091"` | Must be valid `IP:PORT`. | API bind address in `IP:PORT` format. |
|
||||
| whitelist | `IpNetwork[]` | `["127.0.0.0/8"]` | — | CIDR whitelist allowed to access API. |
|
||||
| auth_header | `String` | `""` | — | Exact expected `Authorization` header value (empty = disabled). |
|
||||
| request_body_limit_bytes | `usize` | `65536` | Must be `> 0`. | Maximum accepted HTTP request body size. |
|
||||
| minimal_runtime_enabled | `bool` | `true` | — | Enables minimal runtime snapshots endpoint logic. |
|
||||
| minimal_runtime_cache_ttl_ms | `u64` | `1000` | `0..=60000`. | Cache TTL for minimal runtime snapshots (ms; `0` disables cache). |
|
||||
| runtime_edge_enabled | `bool` | `false` | — | Enables runtime edge endpoints. |
|
||||
| runtime_edge_cache_ttl_ms | `u64` | `1000` | `0..=60000`. | Cache TTL for runtime edge aggregation payloads (ms). |
|
||||
| runtime_edge_top_n | `usize` | `10` | `1..=1000`. | Top-N size for edge connection leaderboard. |
|
||||
| runtime_edge_events_capacity | `usize` | `256` | `16..=4096`. | Ring-buffer capacity for runtime edge events. |
|
||||
| read_only | `bool` | `false` | — | Rejects mutating API endpoints when enabled. |
|
||||
|
||||
## [[server.listeners]]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| ip | `IpAddr` | — | — | Listener bind IP. |
|
||||
| announce | `String \| null` | — | — | Public IP/domain announced in proxy links (priority over `announce_ip`). |
|
||||
| announce_ip | `IpAddr \| null` | — | Deprecated. Use `announce`. | Deprecated legacy announce IP (migrated to `announce` if needed). |
|
||||
| proxy_protocol | `bool \| null` | `null` | — | Per-listener override for PROXY protocol enable flag. |
|
||||
| reuse_allow | `bool` | `false` | — | Enables `SO_REUSEPORT` for multi-instance bind sharing. |
|
||||
|
||||
## [timeouts]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| client_handshake | `u64` | `30` | — | Client handshake timeout. |
|
||||
| tg_connect | `u64` | `10` | — | Upstream Telegram connect timeout. |
|
||||
| client_keepalive | `u64` | `15` | — | Client keepalive timeout. |
|
||||
| client_ack | `u64` | `90` | — | Client ACK timeout. |
|
||||
| me_one_retry | `u8` | `12` | none | Fast reconnect attempts budget for single-endpoint DC scenarios. |
|
||||
| me_one_timeout_ms | `u64` | `1200` | none | Timeout in milliseconds for each quick single-endpoint reconnect attempt. |
|
||||
|
||||
## [censorship]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| tls_domain | `String` | `"petrovich.ru"` | — | Primary TLS domain used in fake TLS handshake profile. |
|
||||
| tls_domains | `String[]` | `[]` | — | Additional TLS domains for generating multiple links. |
|
||||
| mask | `bool` | `true` | — | Enables masking/fronting relay mode. |
|
||||
| mask_host | `String \| null` | `null` | — | Upstream mask host for TLS fronting relay. |
|
||||
| mask_port | `u16` | `443` | — | Upstream mask port for TLS fronting relay. |
|
||||
| mask_unix_sock | `String \| null` | `null` | — | Unix socket path for mask backend instead of TCP host/port. |
|
||||
| fake_cert_len | `usize` | `2048` | — | Length of synthetic certificate payload when emulation data is unavailable. |
|
||||
| tls_emulation | `bool` | `true` | — | Enables certificate/TLS behavior emulation from cached real fronts. |
|
||||
| tls_front_dir | `String` | `"tlsfront"` | — | Directory path for TLS front cache storage. |
|
||||
| server_hello_delay_min_ms | `u64` | `0` | — | Minimum server_hello delay for anti-fingerprint behavior (ms). |
|
||||
| server_hello_delay_max_ms | `u64` | `0` | — | Maximum server_hello delay for anti-fingerprint behavior (ms). |
|
||||
| tls_new_session_tickets | `u8` | `0` | — | Number of `NewSessionTicket` messages to emit after handshake. |
|
||||
| tls_full_cert_ttl_secs | `u64` | `90` | — | TTL for sending full cert payload per (domain, client IP) tuple. |
|
||||
| alpn_enforce | `bool` | `true` | — | Enforces ALPN echo behavior based on client preference. |
|
||||
| mask_proxy_protocol | `u8` | `0` | — | PROXY protocol mode for mask backend (`0` disabled, `1` v1, `2` v2). |
|
||||
| mask_shape_hardening | `bool` | `true` | — | Enables client->mask shape-channel hardening by applying controlled tail padding to bucket boundaries on mask relay shutdown. |
|
||||
| mask_shape_hardening_aggressive_mode | `bool` | `false` | Requires `mask_shape_hardening = true`. | Opt-in aggressive shaping profile: allows shaping on backend-silent non-EOF paths and switches above-cap blur to strictly positive random tail. |
|
||||
| mask_shape_bucket_floor_bytes | `usize` | `512` | Must be `> 0`; should be `<= mask_shape_bucket_cap_bytes`. | Minimum bucket size used by shape-channel hardening. |
|
||||
| mask_shape_bucket_cap_bytes | `usize` | `4096` | Must be `>= mask_shape_bucket_floor_bytes`. | Maximum bucket size used by shape-channel hardening; traffic above cap is not padded further. |
|
||||
| mask_shape_above_cap_blur | `bool` | `false` | Requires `mask_shape_hardening = true`; requires `mask_shape_above_cap_blur_max_bytes > 0`. | Adds bounded randomized tail bytes even when forwarded size already exceeds cap. |
|
||||
| mask_shape_above_cap_blur_max_bytes | `usize` | `512` | Must be `<= 1048576`; must be `> 0` when `mask_shape_above_cap_blur = true`. | Maximum randomized extra bytes appended above cap. |
|
||||
| mask_relay_max_bytes | `usize` | `5242880` | Must be `> 0`; must be `<= 67108864`. | Maximum relayed bytes per direction on unauthenticated masking fallback path. |
|
||||
| mask_classifier_prefetch_timeout_ms | `u64` | `5` | Must be within `[5, 50]`. | Timeout budget (ms) for extending fragmented initial classifier window on masking fallback. |
|
||||
| mask_timing_normalization_enabled | `bool` | `false` | Requires `mask_timing_normalization_floor_ms > 0`; requires `ceiling >= floor`. | Enables timing envelope normalization on masking outcomes. |
|
||||
| mask_timing_normalization_floor_ms | `u64` | `0` | Must be `> 0` when timing normalization is enabled; must be `<= ceiling`. | Lower bound (ms) for masking outcome normalization target. |
|
||||
| mask_timing_normalization_ceiling_ms | `u64` | `0` | Must be `>= floor`; must be `<= 60000`. | Upper bound (ms) for masking outcome normalization target. |
|
||||
|
||||
### Shape-channel hardening notes (`[censorship]`)
|
||||
|
||||
These parameters are designed to reduce one specific fingerprint source during masking: the exact number of bytes sent from proxy to `mask_host` for invalid or probing traffic.
|
||||
|
||||
Without hardening, a censor can often correlate probe input length with backend-observed length very precisely (for example: `5 + body_sent` on early TLS reject paths). That creates a length-based classifier signal.
|
||||
|
||||
When `mask_shape_hardening = true`, Telemt pads the **client->mask** stream tail to a bucket boundary at relay shutdown:
|
||||
|
||||
- Total bytes sent to mask are first measured.
|
||||
- A bucket is selected using powers of two starting from `mask_shape_bucket_floor_bytes`.
|
||||
- Padding is added only if total bytes are below `mask_shape_bucket_cap_bytes`.
|
||||
- If bytes already exceed cap, no extra padding is added.
|
||||
|
||||
This means multiple nearby probe sizes collapse into the same backend-observed size class, making active classification harder.
|
||||
|
||||
What each parameter changes in practice:
|
||||
|
||||
- `mask_shape_hardening`
|
||||
Enables or disables this entire length-shaping stage on the fallback path.
|
||||
When `false`, backend-observed length stays close to the real forwarded probe length.
|
||||
When `true`, clean relay shutdown can append random padding bytes to move the total into a bucket.
|
||||
|
||||
- `mask_shape_bucket_floor_bytes`
|
||||
Sets the first bucket boundary used for small probes.
|
||||
Example: with floor `512`, a malformed probe that would otherwise forward `37` bytes can be expanded to `512` bytes on clean EOF.
|
||||
Larger floor values hide very small probes better, but increase egress cost.
|
||||
|
||||
- `mask_shape_bucket_cap_bytes`
|
||||
Sets the largest bucket Telemt will pad up to with bucket logic.
|
||||
Example: with cap `4096`, a forwarded total of `1800` bytes may be padded to `2048` or `4096` depending on the bucket ladder, but a total already above `4096` will not be bucket-padded further.
|
||||
Larger cap values increase the range over which size classes are collapsed, but also increase worst-case overhead.
|
||||
|
||||
- Clean EOF matters in conservative mode
|
||||
In the default profile, shape padding is intentionally conservative: it is applied on clean relay shutdown, not on every timeout/drip path.
|
||||
This avoids introducing new timeout-tail artifacts that some backends or tests interpret as a separate fingerprint.
|
||||
|
||||
Practical trade-offs:
|
||||
|
||||
- Better anti-fingerprinting on size/shape channel.
|
||||
- Slightly higher egress overhead for small probes due to padding.
|
||||
- Behavior is intentionally conservative and enabled by default.
|
||||
|
||||
Recommended starting profile:
|
||||
|
||||
- `mask_shape_hardening = true` (default)
|
||||
- `mask_shape_bucket_floor_bytes = 512`
|
||||
- `mask_shape_bucket_cap_bytes = 4096`
|
||||
|
||||
### Aggressive mode notes (`[censorship]`)
|
||||
|
||||
`mask_shape_hardening_aggressive_mode` is an opt-in profile for higher anti-classifier pressure.
|
||||
|
||||
- Default is `false` to preserve conservative timeout/no-tail behavior.
|
||||
- Requires `mask_shape_hardening = true`.
|
||||
- When enabled, backend-silent non-EOF masking paths may be shaped.
|
||||
- When enabled together with above-cap blur, the random extra tail uses `[1, max]` instead of `[0, max]`.
|
||||
|
||||
What changes when aggressive mode is enabled:
|
||||
|
||||
- Backend-silent timeout paths can be shaped
|
||||
In default mode, a client that keeps the socket half-open and times out will usually not receive shape padding on that path.
|
||||
In aggressive mode, Telemt may still shape that backend-silent session if no backend bytes were returned.
|
||||
This is specifically aimed at active probes that try to avoid EOF in order to preserve an exact backend-observed length.
|
||||
|
||||
- Above-cap blur always adds at least one byte
|
||||
In default mode, above-cap blur may choose `0`, so some oversized probes still land on their exact base forwarded length.
|
||||
In aggressive mode, that exact-base sample is removed by construction.
|
||||
|
||||
- Tradeoff
|
||||
Aggressive mode improves resistance to active length classifiers, but it is more opinionated and less conservative.
|
||||
If your deployment prioritizes strict compatibility with timeout/no-tail semantics, leave it disabled.
|
||||
If your threat model includes repeated active probing by a censor, this mode is the stronger profile.
|
||||
|
||||
Use this mode only when your threat model prioritizes classifier resistance over strict compatibility with conservative masking semantics.
|
||||
|
||||
### Above-cap blur notes (`[censorship]`)
|
||||
|
||||
`mask_shape_above_cap_blur` adds a second-stage blur for very large probes that are already above `mask_shape_bucket_cap_bytes`.
|
||||
|
||||
- A random tail in `[0, mask_shape_above_cap_blur_max_bytes]` is appended in default mode.
|
||||
- In aggressive mode, the random tail becomes strictly positive: `[1, mask_shape_above_cap_blur_max_bytes]`.
|
||||
- This reduces exact-size leakage above cap at bounded overhead.
|
||||
- Keep `mask_shape_above_cap_blur_max_bytes` conservative to avoid unnecessary egress growth.
|
||||
|
||||
Operational meaning:
|
||||
|
||||
- Without above-cap blur
|
||||
A probe that forwards `5005` bytes will still look like `5005` bytes to the backend if it is already above cap.
|
||||
|
||||
- With above-cap blur enabled
|
||||
That same probe may look like any value in a bounded window above its base length.
|
||||
Example with `mask_shape_above_cap_blur_max_bytes = 64`:
|
||||
backend-observed size becomes `5005..5069` in default mode, or `5006..5069` in aggressive mode.
|
||||
|
||||
- Choosing `mask_shape_above_cap_blur_max_bytes`
|
||||
Small values reduce cost but preserve more separability between far-apart oversized classes.
|
||||
Larger values blur oversized classes more aggressively, but add more egress overhead and more output variance.
|
||||
|
||||
### Timing normalization envelope notes (`[censorship]`)
|
||||
|
||||
`mask_timing_normalization_enabled` smooths timing differences between masking outcomes by applying a target duration envelope.
|
||||
|
||||
- A random target is selected in `[mask_timing_normalization_floor_ms, mask_timing_normalization_ceiling_ms]`.
|
||||
- Fast paths are delayed up to the selected target.
|
||||
- Slow paths are not forced to finish by the ceiling (the envelope is best-effort shaping, not truncation).
|
||||
|
||||
Recommended starting profile for timing shaping:
|
||||
|
||||
- `mask_timing_normalization_enabled = true`
|
||||
- `mask_timing_normalization_floor_ms = 180`
|
||||
- `mask_timing_normalization_ceiling_ms = 320`
|
||||
|
||||
If your backend or network is very bandwidth-constrained, reduce cap first. If probes are still too distinguishable in your environment, increase floor gradually.
|
||||
|
||||
## [access]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | TOML shape example | Description |
|
||||
|---|---|---|---|---|---|
|
||||
| users | `Map<String, String>` | `{"default": "000…000"}` | Secret must be 32 hex characters. | `[access.users]`<br>`user = "32-hex secret"`<br>`user2 = "32-hex secret"` | User credentials map used for client authentication. |
|
||||
| user_ad_tags | `Map<String, String>` | `{}` | Every value must be exactly 32 hex characters. | `[access.user_ad_tags]`<br>`user = "32-hex ad_tag"` | Per-user ad tags used as override over `general.ad_tag`. |
|
||||
| user_max_tcp_conns | `Map<String, usize>` | `{}` | — | `[access.user_max_tcp_conns]`<br>`user = 500` | Per-user maximum concurrent TCP connections. |
|
||||
| user_expirations | `Map<String, DateTime<Utc>>` | `{}` | Timestamp must be valid RFC3339/ISO-8601 datetime. | `[access.user_expirations]`<br>`user = "2026-12-31T23:59:59Z"` | Per-user account expiration timestamps. |
|
||||
| user_data_quota | `Map<String, u64>` | `{}` | — | `[access.user_data_quota]`<br>`user = 1073741824` | Per-user traffic quota in bytes. |
|
||||
| user_max_unique_ips | `Map<String, usize>` | `{}` | — | `[access.user_max_unique_ips]`<br>`user = 16` | Per-user unique source IP limits. |
|
||||
| user_max_unique_ips_global_each | `usize` | `0` | — | `user_max_unique_ips_global_each = 0` | Global fallback used when `[access.user_max_unique_ips]` has no per-user override. |
|
||||
| user_max_unique_ips_mode | `"active_window" \| "time_window" \| "combined"` | `"active_window"` | — | `user_max_unique_ips_mode = "active_window"` | Unique source IP limit accounting mode. |
|
||||
| user_max_unique_ips_window_secs | `u64` | `30` | Must be `> 0`. | `user_max_unique_ips_window_secs = 30` | Window size (seconds) used by unique-IP accounting modes that use time windows. |
|
||||
| replay_check_len | `usize` | `65536` | — | `replay_check_len = 65536` | Replay-protection storage length. |
|
||||
| replay_window_secs | `u64` | `1800` | — | `replay_window_secs = 1800` | Replay-protection window in seconds. |
|
||||
| ignore_time_skew | `bool` | `false` | — | `ignore_time_skew = false` | Disables client/server timestamp skew checks in replay validation when enabled. |
|
||||
|
||||
## [[upstreams]]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| type | `"direct" \| "socks4" \| "socks5"` | — | Required field. | Upstream transport type selector. |
|
||||
| weight | `u16` | `1` | none | Base weight used by weighted-random upstream selection. |
|
||||
| enabled | `bool` | `true` | none | Disabled entries are excluded from upstream selection at runtime. |
|
||||
| scopes | `String` | `""` | none | Comma-separated scope tags used for request-level upstream filtering. |
|
||||
| interface | `String \| null` | `null` | Optional; type-specific runtime rules apply. | Optional outbound interface/local bind hint (supported with type-specific rules). |
|
||||
| bind_addresses | `String[] \| null` | `null` | Applies to `type = "direct"`. | Optional explicit local source bind addresses for `type = "direct"`. |
|
||||
| address | `String` | — | Required for `type = "socks4"` and `type = "socks5"`. | SOCKS server endpoint (`host:port` or `ip:port`) for SOCKS upstream types. |
|
||||
| user_id | `String \| null` | `null` | Only for `type = "socks4"`. | SOCKS4 CONNECT user ID (`type = "socks4"` only). |
|
||||
| username | `String \| null` | `null` | Only for `type = "socks5"`. | SOCKS5 username (`type = "socks5"` only). |
|
||||
| password | `String \| null` | `null` | Only for `type = "socks5"`. | SOCKS5 password (`type = "socks5"` only). |
|
||||
139
docs/FAQ.en.md
Normal file
139
docs/FAQ.en.md
Normal file
@@ -0,0 +1,139 @@
|
||||
## How to set up "proxy sponsor" channel and statistics via @MTProxybot bot
|
||||
|
||||
1. Go to @MTProxybot bot.
|
||||
2. Enter the command `/newproxy`
|
||||
3. Send the server IP and port. For example: 1.2.3.4:443
|
||||
4. Open the config `nano /etc/telemt/telemt.toml`.
|
||||
5. Copy and send the user secret from the [access.users] section to the bot.
|
||||
6. Copy the tag received from the bot. For example 1234567890abcdef1234567890abcdef.
|
||||
> [!WARNING]
|
||||
> The link provided by the bot will not work. Do not copy or use it!
|
||||
7. Uncomment the ad_tag parameter and enter the tag received from the bot.
|
||||
8. Uncomment/add the parameter `use_middle_proxy = true`.
|
||||
|
||||
Config example:
|
||||
```toml
|
||||
[general]
|
||||
ad_tag = "1234567890abcdef1234567890abcdef"
|
||||
use_middle_proxy = true
|
||||
```
|
||||
9. Save the config. Ctrl+S -> Ctrl+X.
|
||||
10. Restart telemt `systemctl restart telemt`.
|
||||
11. In the bot, send the command /myproxies and select the added server.
|
||||
12. Click the "Set promotion" button.
|
||||
13. Send a **public link** to the channel. Private channels cannot be added!
|
||||
14. Wait approximately 1 hour for the information to update on Telegram servers.
|
||||
> [!WARNING]
|
||||
> You will not see the "proxy sponsor" if you are already subscribed to the channel.
|
||||
|
||||
**You can also set up different channels for different users.**
|
||||
```toml
|
||||
[access.user_ad_tags]
|
||||
hello = "ad_tag"
|
||||
hello2 = "ad_tag2"
|
||||
```
|
||||
|
||||
## Why is middle proxy (ME) needed
|
||||
https://github.com/telemt/telemt/discussions/167
|
||||
|
||||
## How many people can use 1 link
|
||||
|
||||
By default, 1 link can be used by any number of people.
|
||||
You can limit the number of IPs using the proxy.
|
||||
```toml
|
||||
[access.user_max_unique_ips]
|
||||
hello = 1
|
||||
```
|
||||
This parameter limits how many unique IPs can use 1 link simultaneously. If one user disconnects, a second user can connect. Also, multiple users can sit behind the same IP.
|
||||
|
||||
## How to create multiple different links
|
||||
|
||||
1. Generate the required number of secrets `openssl rand -hex 16`
|
||||
2. Open the config `nano /etc/telemt.toml`
|
||||
3. Add new users.
|
||||
```toml
|
||||
[access.users]
|
||||
user1 = "00000000000000000000000000000001"
|
||||
user2 = "00000000000000000000000000000002"
|
||||
user3 = "00000000000000000000000000000003"
|
||||
```
|
||||
4. Save the config. Ctrl+S -> Ctrl+X. You don't need to restart telemt.
|
||||
5. Get the links via
|
||||
```bash
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||
```
|
||||
|
||||
## How to view metrics
|
||||
|
||||
1. Open the config `nano /etc/telemt.toml`
|
||||
2. Add the following parameters
|
||||
```toml
|
||||
[server]
|
||||
metrics_port = 9090
|
||||
metrics_whitelist = ["127.0.0.1/32", "::1/128", "0.0.0.0/0"]
|
||||
```
|
||||
3. Save the config. Ctrl+S -> Ctrl+X.
|
||||
4. Metrics are available at SERVER_IP:9090/metrics.
|
||||
> [!WARNING]
|
||||
> "0.0.0.0/0" in metrics_whitelist opens access from any IP. Replace with your own IP. For example "1.2.3.4"
|
||||
|
||||
## Additional parameters
|
||||
|
||||
### Domain in link instead of IP
|
||||
To specify a domain in the links, add to the `[general.links]` section of the config file.
|
||||
```toml
|
||||
[general.links]
|
||||
public_host = "proxy.example.com"
|
||||
```
|
||||
|
||||
### Server connection limit
|
||||
Limits the total number of open connections to the server:
|
||||
```toml
|
||||
[server]
|
||||
max_connections = 10000 # 0 - unlimited, 10000 - default
|
||||
```
|
||||
|
||||
### Upstream Manager
|
||||
To specify an upstream, add to the `[[upstreams]]` section of the config.toml file:
|
||||
#### Binding to IP
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 1
|
||||
enabled = true
|
||||
interface = "192.168.1.100" # Change to your outgoing IP
|
||||
```
|
||||
#### SOCKS4/5 as Upstream
|
||||
- Without authentication:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
enabled = true
|
||||
```
|
||||
|
||||
- With authentication:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||
username = "user" # Username for Auth on SOCKS-server
|
||||
password = "pass" # Password for Auth on SOCKS-server
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
enabled = true
|
||||
```
|
||||
|
||||
#### Shadowsocks as Upstream
|
||||
Requires `use_middle_proxy = false`.
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
[[upstreams]]
|
||||
type = "shadowsocks"
|
||||
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@1.2.3.4:8388"
|
||||
weight = 1
|
||||
enabled = true
|
||||
```
|
||||
@@ -1,9 +1,9 @@
|
||||
## Как настроить канал "спонсор прокси"
|
||||
## Как настроить канал "спонсор прокси" и статистику через бота @MTProxybot
|
||||
|
||||
1. Зайти в бота @MTProxybot.
|
||||
2. Ввести команду `/newproxy`
|
||||
3. Отправить IP и порт сервера. Например: 1.2.3.4:443
|
||||
4. Открыть конфиг `nano /etc/telemt.toml`.
|
||||
4. Открыть конфиг `nano /etc/telemt/telemt.toml`.
|
||||
5. Скопировать и отправить боту секрет пользователя из раздела [access.users].
|
||||
6. Скопировать полученный tag у бота. Например 1234567890abcdef1234567890abcdef.
|
||||
> [!WARNING]
|
||||
@@ -26,6 +26,17 @@ use_middle_proxy = true
|
||||
> [!WARNING]
|
||||
> У вас не будет отображаться "спонсор прокси" если вы уже подписаны на канал.
|
||||
|
||||
**Также вы можете настроить разные каналы для разных пользователей.**
|
||||
```toml
|
||||
[access.user_ad_tags]
|
||||
hello = "ad_tag"
|
||||
hello2 = "ad_tag2"
|
||||
```
|
||||
|
||||
## Зачем нужен middle proxy (ME)
|
||||
https://github.com/telemt/telemt/discussions/167
|
||||
|
||||
|
||||
## Сколько человек может пользоваться 1 ссылкой
|
||||
|
||||
По умолчанию 1 ссылкой может пользоваться сколько угодно человек.
|
||||
@@ -48,7 +59,10 @@ user2 = "00000000000000000000000000000002"
|
||||
user3 = "00000000000000000000000000000003"
|
||||
```
|
||||
4. Сохранить конфиг. Ctrl+S -> Ctrl+X. Перезапускать telemt не нужно.
|
||||
5. Получить ссылки через `journalctl -u telemt -n -g "links" --no-pager -o cat | tac`
|
||||
5. Получить ссылки через
|
||||
```bash
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||
```
|
||||
|
||||
## Как посмотреть метрики
|
||||
|
||||
@@ -63,3 +77,64 @@ metrics_whitelist = ["127.0.0.1/32", "::1/128", "0.0.0.0/0"]
|
||||
4. Метрики доступны по адресу SERVER_IP:9090/metrics.
|
||||
> [!WARNING]
|
||||
> "0.0.0.0/0" в metrics_whitelist открывает доступ с любого IP. Замените на свой ip. Например "1.2.3.4"
|
||||
|
||||
## Дополнительные параметры
|
||||
|
||||
### Домен в ссылке вместо IP
|
||||
Чтобы указать домен в ссылках, добавьте в секцию `[general.links]` файла config.
|
||||
```toml
|
||||
[general.links]
|
||||
public_host = "proxy.example.com"
|
||||
```
|
||||
|
||||
### Общий лимит подключений к серверу
|
||||
Ограничивает общее число открытых подключений к серверу:
|
||||
```toml
|
||||
[server]
|
||||
max_connections = 10000 # 0 - unlimited, 10000 - default
|
||||
```
|
||||
|
||||
### Upstream Manager
|
||||
Чтобы указать апстрим, добавьте в секцию `[[upstreams]]` файла config.toml:
|
||||
#### Привязка к IP
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 1
|
||||
enabled = true
|
||||
interface = "192.168.1.100" # Change to your outgoing IP
|
||||
```
|
||||
#### SOCKS4/5 как Upstream
|
||||
- Без авторизации:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
enabled = true
|
||||
```
|
||||
|
||||
- С авторизацией:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||
username = "user" # Username for Auth on SOCKS-server
|
||||
password = "pass" # Password for Auth on SOCKS-server
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
enabled = true
|
||||
```
|
||||
|
||||
#### Shadowsocks как Upstream
|
||||
Требует `use_middle_proxy = false`.
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
[[upstreams]]
|
||||
type = "shadowsocks"
|
||||
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@1.2.3.4:8388"
|
||||
weight = 1
|
||||
enabled = true
|
||||
```
|
||||
|
||||
92
docs/LICENSE/LICENSE.de.md
Normal file
92
docs/LICENSE/LICENSE.de.md
Normal file
@@ -0,0 +1,92 @@
|
||||
# Öffentliche TELEMT-Lizenz 3
|
||||
|
||||
***Alle Rechte vorbehalten (c) 2026 Telemt***
|
||||
|
||||
Hiermit wird jeder Person, die eine Kopie dieser Software und der dazugehörigen Dokumentation (nachfolgend "Software") erhält, unentgeltlich die Erlaubnis erteilt, die Software ohne Einschränkungen zu nutzen, einschließlich des Rechts, die Software zu verwenden, zu vervielfältigen, zu ändern, abgeleitete Werke zu erstellen, zu verbinden, zu veröffentlichen, zu verbreiten, zu unterlizenzieren und/oder Kopien der Software zu verkaufen sowie diese Rechte auch denjenigen einzuräumen, denen die Software zur Verfügung gestellt wird, vorausgesetzt, dass sämtliche Urheberrechtshinweise sowie die Bedingungen und Bestimmungen dieser Lizenz eingehalten werden.
|
||||
|
||||
### Begriffsbestimmungen
|
||||
|
||||
Für die Zwecke dieser Lizenz gelten die folgenden Definitionen:
|
||||
|
||||
**"Software" (Software)** — die Telemt-Software einschließlich Quellcode, Dokumentation und sämtlicher zugehöriger Dateien, die unter den Bedingungen dieser Lizenz verbreitet werden.
|
||||
|
||||
**"Contributor" (Contributor)** — jede natürliche oder juristische Person, die Code, Patches, Dokumentation oder andere Materialien eingereicht hat, die von den Maintainers des Projekts angenommen und in die Software aufgenommen wurden.
|
||||
|
||||
**"Beitrag" (Contribution)** — jedes urheberrechtlich geschützte Werk, das bewusst zur Aufnahme in die Software eingereicht wurde.
|
||||
|
||||
**"Modifizierte Version" (Modified Version)** — jede Version der Software, die gegenüber der ursprünglichen Software geändert, angepasst, erweitert oder anderweitig modifiziert wurde.
|
||||
|
||||
**"Maintainers" (Maintainers)** — natürliche oder juristische Personen, die für das offizielle Telemt-Projekt und dessen offizielle Veröffentlichungen verantwortlich sind.
|
||||
|
||||
### 1 Urheberrechtshinweis (Attribution)
|
||||
|
||||
Bei der Weitergabe der Software, sowohl in Form des Quellcodes als auch in binärer Form, MÜSSEN folgende Elemente erhalten bleiben:
|
||||
|
||||
- der oben genannte Urheberrechtshinweis;
|
||||
- der vollständige Text dieser Lizenz;
|
||||
- sämtliche bestehenden Hinweise auf Urheberschaft.
|
||||
|
||||
### 2 Hinweis auf Modifikationen
|
||||
|
||||
Wenn Änderungen an der Software vorgenommen werden, MUSS die Person, die diese Änderungen vorgenommen hat, eindeutig darauf hinweisen, dass die Software modifiziert wurde, und eine kurze Beschreibung der vorgenommenen Änderungen beifügen.
|
||||
|
||||
Modifizierte Versionen der Software DÜRFEN NICHT als die originale Version von Telemt dargestellt werden.
|
||||
|
||||
### 3 Marken und Bezeichnungen
|
||||
|
||||
Diese Lizenz GEWÄHRT KEINE Rechte zur Nutzung der Bezeichnung **"Telemt"**, des Telemt-Logos oder sonstiger Marken, Kennzeichen oder Branding-Elemente von Telemt.
|
||||
|
||||
Weiterverbreitete oder modifizierte Versionen der Software DÜRFEN die Bezeichnung Telemt nicht in einer Weise verwenden, die bei Nutzern den Eindruck eines offiziellen Ursprungs oder einer Billigung durch das Telemt-Projekt erwecken könnte, sofern hierfür keine ausdrückliche Genehmigung der Maintainers vorliegt.
|
||||
|
||||
Die Verwendung der Bezeichnung **Telemt** zur Beschreibung einer modifizierten Version der Software ist nur zulässig, wenn diese Version eindeutig als modifiziert oder inoffiziell gekennzeichnet ist.
|
||||
|
||||
Jegliche Verbreitung, die Nutzer vernünftigerweise darüber täuschen könnte, dass es sich um eine offizielle Veröffentlichung von Telemt handelt, ist untersagt.
|
||||
|
||||
### 4 Transparenz bei der Verbreitung von Binärversionen
|
||||
|
||||
Im Falle der Verbreitung kompilierter Binärversionen der Software wird der Verbreiter HIERMIT ERMUTIGT (encouraged), soweit dies vernünftigerweise möglich ist, Zugang zum entsprechenden Quellcode sowie zu den Build-Anweisungen bereitzustellen.
|
||||
|
||||
Diese Praxis trägt zur Transparenz bei und ermöglicht es Empfängern, die Integrität und Reproduzierbarkeit der verbreiteten Builds zu überprüfen.
|
||||
|
||||
## 5 Gewährung einer Patentlizenz und Beendigung von Rechten
|
||||
|
||||
Jeder Contributor gewährt den Empfängern der Software eine unbefristete, weltweite, nicht-exklusive, unentgeltliche, lizenzgebührenfreie und unwiderrufliche Patentlizenz für:
|
||||
|
||||
- die Herstellung,
|
||||
- die Beauftragung der Herstellung,
|
||||
- die Nutzung,
|
||||
- das Anbieten zum Verkauf,
|
||||
- den Verkauf,
|
||||
- den Import,
|
||||
- sowie jede sonstige Verbreitung der Software.
|
||||
|
||||
Diese Patentlizenz erstreckt sich ausschließlich auf solche Patentansprüche, die notwendigerweise durch den jeweiligen Beitrag des Contributors allein oder in Kombination mit der Software verletzt würden.
|
||||
|
||||
Leitet eine Person ein Patentverfahren ein oder beteiligt sich daran, einschließlich Gegenklagen oder Kreuzklagen, mit der Behauptung, dass die Software oder ein darin enthaltener Beitrag ein Patent verletzt, **erlöschen sämtliche durch diese Lizenz gewährten Rechte für diese Person unmittelbar mit Einreichung der Klage**.
|
||||
|
||||
Darüber hinaus erlöschen alle durch diese Lizenz gewährten Rechte **automatisch**, wenn eine Person ein gerichtliches Verfahren einleitet, in dem behauptet wird, dass die Software selbst ein Patent oder andere Rechte des geistigen Eigentums verletzt.
|
||||
|
||||
### 6 Beteiligung und Beiträge zur Entwicklung
|
||||
|
||||
Sofern ein Contributor nicht ausdrücklich etwas anderes erklärt, gilt jeder Beitrag, der bewusst zur Aufnahme in die Software eingereicht wird, als unter den Bedingungen dieser Lizenz lizenziert.
|
||||
|
||||
Durch die Einreichung eines Beitrags gewährt der Contributor den Maintainers des Telemt-Projekts sowie allen Empfängern der Software die in dieser Lizenz beschriebenen Rechte in Bezug auf diesen Beitrag.
|
||||
|
||||
### 7 Urheberhinweis bei Netzwerk- und Servicenutzung
|
||||
|
||||
Wird die Software zur Bereitstellung eines öffentlich zugänglichen Netzwerkdienstes verwendet, MUSS der Betreiber dieses Dienstes einen Hinweis auf die Urheberschaft von Telemt an mindestens einer der folgenden Stellen anbringen:
|
||||
|
||||
* in der Servicedokumentation;
|
||||
* in der Dienstbeschreibung;
|
||||
* auf einer Seite "Über" oder einer vergleichbaren Informationsseite;
|
||||
* in anderen für Nutzer zugänglichen Materialien, die in angemessenem Zusammenhang mit dem Dienst stehen.
|
||||
|
||||
Ein solcher Hinweis DARF NICHT den Eindruck erwecken, dass der Dienst vom Telemt-Projekt oder dessen Maintainers unterstützt oder offiziell gebilligt wird.
|
||||
|
||||
### 8 Haftungsausschluss und salvatorische Klausel
|
||||
|
||||
DIE SOFTWARE WIRD "WIE BESEHEN" BEREITGESTELLT, OHNE JEGLICHE AUSDRÜCKLICHE ODER STILLSCHWEIGENDE GEWÄHRLEISTUNG, EINSCHLIESSLICH, ABER NICHT BESCHRÄNKT AUF GEWÄHRLEISTUNGEN DER MARKTGÄNGIGKEIT, DER EIGNUNG FÜR EINEN BESTIMMTEN ZWECK UND DER NICHTVERLETZUNG VON RECHTEN.
|
||||
|
||||
IN KEINEM FALL HAFTEN DIE AUTOREN ODER RECHTEINHABER FÜR IRGENDWELCHE ANSPRÜCHE, SCHÄDEN ODER SONSTIGE HAFTUNG, DIE AUS VERTRAG, UNERLAUBTER HANDLUNG ODER AUF ANDERE WEISE AUS DER SOFTWARE ODER DER NUTZUNG DER SOFTWARE ENTSTEHEN.
|
||||
|
||||
SOLLTE EINE BESTIMMUNG DIESER LIZENZ ALS UNWIRKSAM ODER NICHT DURCHSETZBAR ANGESEHEN WERDEN, IST DIESE BESTIMMUNG SO AUSZULEGEN, DASS SIE DEM URSPRÜNGLICHEN WILLEN DER PARTEIEN MÖGLICHST NAHEKOMMT; DIE ÜBRIGEN BESTIMMUNGEN BLEIBEN DAVON UNBERÜHRT UND IN VOLLER WIRKUNG.
|
||||
143
docs/LICENSE/LICENSE.en.md
Normal file
143
docs/LICENSE/LICENSE.en.md
Normal file
@@ -0,0 +1,143 @@
|
||||
###### TELEMT Public License 3 ######
|
||||
##### Copyright (c) 2026 Telemt #####
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this Software and associated documentation files (the "Software"),
|
||||
to use, reproduce, modify, prepare derivative works of, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
persons to whom the Software is furnished to do so, provided that all
|
||||
copyright notices, license terms, and conditions set forth in this License
|
||||
are preserved and complied with.
|
||||
|
||||
### Official Translations
|
||||
|
||||
The canonical version of this License is the English version.
|
||||
|
||||
Official translations are provided for informational purposes only
|
||||
and for convenience, and do not have legal force. In case of any
|
||||
discrepancy, the English version of this License shall prevail.
|
||||
|
||||
Available versions:
|
||||
- English in Markdown: docs/LICENSE/LICENSE.md
|
||||
- German: docs/LICENSE/LICENSE.de.md
|
||||
- Russian: docs/LICENSE/LICENSE.ru.md
|
||||
|
||||
### Definitions
|
||||
|
||||
For the purposes of this License:
|
||||
|
||||
"Software" means the Telemt software, including source code, documentation,
|
||||
and any associated files distributed under this License.
|
||||
|
||||
"Contributor" means any person or entity that submits code, patches,
|
||||
documentation, or other contributions to the Software that are accepted
|
||||
into the Software by the maintainers.
|
||||
|
||||
"Contribution" means any work of authorship intentionally submitted
|
||||
to the Software for inclusion in the Software.
|
||||
|
||||
"Modified Version" means any version of the Software that has been
|
||||
changed, adapted, extended, or otherwise modified from the original
|
||||
Software.
|
||||
|
||||
"Maintainers" means the individuals or entities responsible for
|
||||
the official Telemt project and its releases.
|
||||
|
||||
#### 1 Attribution
|
||||
|
||||
Redistributions of the Software, in source or binary form, MUST RETAIN the
|
||||
above copyright notice, this license text, and any existing attribution
|
||||
notices.
|
||||
|
||||
#### 2 Modification Notice
|
||||
|
||||
If you modify the Software, you MUST clearly state that the Software has been
|
||||
modified and include a brief description of the changes made.
|
||||
|
||||
Modified versions MUST NOT be presented as the original Telemt.
|
||||
|
||||
#### 3 Trademark and Branding
|
||||
|
||||
This license DOES NOT grant permission to use the name "Telemt",
|
||||
the Telemt logo, or any Telemt trademarks or branding.
|
||||
|
||||
Redistributed or modified versions of the Software MAY NOT use the Telemt
|
||||
name in a way that suggests endorsement or official origin without explicit
|
||||
permission from the Telemt maintainers.
|
||||
|
||||
Use of the name "Telemt" to describe a modified version of the Software
|
||||
is permitted only if the modified version is clearly identified as a
|
||||
modified or unofficial version.
|
||||
|
||||
Any distribution that could reasonably confuse users into believing that
|
||||
the software is an official Telemt release is prohibited.
|
||||
|
||||
#### 4 Binary Distribution Transparency
|
||||
|
||||
If you distribute compiled binaries of the Software,
|
||||
you are ENCOURAGED to provide access to the corresponding
|
||||
source code and build instructions where reasonably possible.
|
||||
|
||||
This helps preserve transparency and allows recipients to verify the
|
||||
integrity and reproducibility of distributed builds.
|
||||
|
||||
#### 5 Patent Grant and Defensive Termination Clause
|
||||
|
||||
Each contributor grants you a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Software.
|
||||
|
||||
This patent license applies only to those patent claims necessarily
|
||||
infringed by the contributor’s contribution alone or by combination of
|
||||
their contribution with the Software.
|
||||
|
||||
If you initiate or participate in any patent litigation, including
|
||||
cross-claims or counterclaims, alleging that the Software or any
|
||||
contribution incorporated within the Software constitutes patent
|
||||
infringement, then **all rights granted to you under this license shall
|
||||
terminate immediately** as of the date such litigation is filed.
|
||||
|
||||
Additionally, if you initiate legal action alleging that the
|
||||
Software itself infringes your patent or other intellectual
|
||||
property rights, then all rights granted to you under this
|
||||
license SHALL TERMINATE automatically.
|
||||
|
||||
#### 6 Contributions
|
||||
|
||||
Unless you explicitly state otherwise, any Contribution intentionally
|
||||
submitted for inclusion in the Software shall be licensed under the terms
|
||||
of this License.
|
||||
|
||||
By submitting a Contribution, you grant the Telemt maintainers and all
|
||||
recipients of the Software the rights described in this License with
|
||||
respect to that Contribution.
|
||||
|
||||
#### 7 Network Use Attribution
|
||||
|
||||
If the Software is used to provide a publicly accessible network service,
|
||||
the operator of such service MUST provide attribution to Telemt in at least
|
||||
one of the following locations:
|
||||
|
||||
- service documentation
|
||||
- service description
|
||||
- an "About" or similar informational page
|
||||
- other user-visible materials reasonably associated with the service
|
||||
|
||||
Such attribution MUST NOT imply endorsement by the Telemt project or its
|
||||
maintainers.
|
||||
|
||||
#### 8 Disclaimer of Warranty and Severability Clause
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
USE OR OTHER DEALINGS IN THE SOFTWARE
|
||||
|
||||
IF ANY PROVISION OF THIS LICENSE IS HELD TO BE INVALID OR UNENFORCEABLE,
|
||||
SUCH PROVISION SHALL BE INTERPRETED TO REFLECT THE ORIGINAL INTENT
|
||||
OF THE PARTIES AS CLOSELY AS POSSIBLE, AND THE REMAINING PROVISIONS
|
||||
SHALL REMAIN IN FULL FORCE AND EFFECT
|
||||
90
docs/LICENSE/LICENSE.ru.md
Normal file
90
docs/LICENSE/LICENSE.ru.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# Публичная лицензия TELEMT 3
|
||||
|
||||
***Все права защищёны (c) 2026 Telemt***
|
||||
|
||||
Настоящим любому лицу, получившему копию данного программного обеспечения и сопутствующей документации (далее — "Программное обеспечение"), безвозмездно предоставляется разрешение использовать Программное обеспечение без ограничений, включая право использовать, воспроизводить, изменять, создавать производные произведения, объединять, публиковать, распространять, сублицензировать и (или) продавать копии Программного обеспечения, а также предоставлять такие права лицам, которым предоставляется Программное обеспечение, при условии соблюдения всех уведомлений об авторских правах, условий и положений настоящей Лицензии.
|
||||
|
||||
### Определения
|
||||
|
||||
Для целей настоящей Лицензии применяются следующие определения:
|
||||
|
||||
**"Программное обеспечение" (Software)** — программное обеспечение Telemt, включая исходный код, документацию и любые связанные файлы, распространяемые на условиях настоящей Лицензии.
|
||||
|
||||
**"Контрибьютор" (Contributor)** — любое физическое или юридическое лицо, направившее код, исправления (патчи), документацию или иные материалы, которые были приняты мейнтейнерами проекта и включены в состав Программного обеспечения.
|
||||
|
||||
**"Вклад" (Contribution)** — любое произведение авторского права, намеренно представленное для включения в состав Программного обеспечения.
|
||||
|
||||
**"Модифицированная версия" (Modified Version)** — любая версия Программного обеспечения, которая была изменена, адаптирована, расширена или иным образом модифицирована по сравнению с исходным Программным обеспечением.
|
||||
|
||||
**"Мейнтейнеры" (Maintainers)** — физические или юридические лица, ответственные за официальный проект Telemt и его официальные релизы.
|
||||
|
||||
### 1 Указание авторства
|
||||
|
||||
При распространении Программного обеспечения, как в форме исходного кода, так и в бинарной форме, ДОЛЖНЫ СОХРАНЯТЬСЯ:
|
||||
|
||||
- указанное выше уведомление об авторских правах;
|
||||
- текст настоящей Лицензии;
|
||||
- любые существующие уведомления об авторстве.
|
||||
|
||||
### 2 Уведомление о модификации
|
||||
|
||||
В случае внесения изменений в Программное обеспечение лицо, осуществившее такие изменения, ОБЯЗАНО явно указать, что Программное обеспечение было модифицировано, а также включить краткое описание внесённых изменений.
|
||||
|
||||
Модифицированные версии Программного обеспечения НЕ ДОЛЖНЫ представляться как оригинальная версия Telemt.
|
||||
|
||||
### 3 Товарные знаки и обозначения
|
||||
|
||||
Настоящая Лицензия НЕ ПРЕДОСТАВЛЯЕТ права использовать наименование **"Telemt"**, логотип Telemt, а также любые товарные знаки, фирменные обозначения или элементы бренда Telemt.
|
||||
|
||||
Распространяемые или модифицированные версии Программного обеспечения НЕ ДОЛЖНЫ использовать наименование Telemt таким образом, который может создавать у пользователей впечатление официального происхождения либо одобрения со стороны проекта Telemt без явного разрешения мейнтейнеров проекта.
|
||||
|
||||
Использование наименования **Telemt** для описания модифицированной версии Программного обеспечения допускается только при условии, что такая версия ясно обозначена как модифицированная или неофициальная.
|
||||
|
||||
Запрещается любое распространение, которое может разумно вводить пользователей в заблуждение относительно того, что программное обеспечение является официальным релизом Telemt.
|
||||
|
||||
### 4 Прозрачность распространения бинарных версий
|
||||
|
||||
В случае распространения скомпилированных бинарных версий Программного обеспечения распространитель НАСТОЯЩИМ ПОБУЖДАЕТСЯ предоставлять доступ к соответствующему исходному коду и инструкциям по сборке, если это разумно возможно.
|
||||
|
||||
Такая практика способствует прозрачности распространения и позволяет получателям проверять целостность и воспроизводимость распространяемых сборок.
|
||||
|
||||
### 5 Предоставление патентной лицензии и прекращение прав
|
||||
|
||||
Каждый контрибьютор предоставляет получателям Программного обеспечения бессрочную, всемирную, неисключительную, безвозмездную, не требующую выплаты роялти и безотзывную патентную лицензию на:
|
||||
|
||||
- изготовление,
|
||||
- поручение изготовления,
|
||||
- использование,
|
||||
- предложение к продаже,
|
||||
- продажу,
|
||||
- импорт,
|
||||
- и иное распространение Программного обеспечения.
|
||||
|
||||
Такая патентная лицензия распространяется исключительно на те патентные требования, которые неизбежно нарушаются соответствующим вкладом контрибьютора как таковым либо его сочетанием с Программным обеспечением.
|
||||
|
||||
Если лицо инициирует либо участвует в каком-либо судебном разбирательстве по патентному спору, включая встречные или перекрёстные иски, утверждая, что Программное обеспечение либо любой вклад, включённый в него, нарушает патент, **все права, предоставленные такому лицу настоящей Лицензией, немедленно прекращаются** с даты подачи соответствующего иска.
|
||||
|
||||
Кроме того, если лицо инициирует судебное разбирательство, утверждая, что само Программное обеспечение нарушает его патентные либо иные права интеллектуальной собственности, все права, предоставленные настоящей Лицензией, **автоматически прекращаются**.
|
||||
|
||||
### 6 Участие и вклад в разработку
|
||||
|
||||
Если контрибьютор явно не указал иное, любой Вклад, намеренно представленный для включения в Программное обеспечение, считается лицензированным на условиях настоящей Лицензии.
|
||||
Путём предоставления Вклада контрибьютор предоставляет мейнтейнером проекта Telemt и всем получателям Программного обеспечения права, предусмотренные настоящей Лицензией, в отношении такого Вклада.
|
||||
|
||||
### 7 Указание авторства при сетевом и сервисном использовании
|
||||
|
||||
В случае использования Программного обеспечения для предоставления публично доступного сетевого сервиса оператор такого сервиса ОБЯЗАН обеспечить указание авторства Telemt как минимум в одном из следующих мест:
|
||||
- документация сервиса;
|
||||
- описание сервиса;
|
||||
- страница "О программе" или аналогичная информационная страница;
|
||||
- иные материалы, доступные пользователям и разумно связанные с данным сервисом.
|
||||
|
||||
Такое указание авторства НЕ ДОЛЖНО создавать впечатление одобрения или официальной поддержки со стороны проекта Telemt либо его мейнтейнеров.
|
||||
|
||||
### 8 Отказ от гарантий и делимость положений
|
||||
|
||||
ПРОГРАММНОЕ ОБЕСПЕЧЕНИЕ ПРЕДОСТАВЛЯЕТСЯ "КАК ЕСТЬ", БЕЗ КАКИХ-ЛИБО ГАРАНТИЙ, ЯВНЫХ ИЛИ ПОДРАЗУМЕВАЕМЫХ, ВКЛЮЧАЯ, НО НЕ ОГРАНИЧИВАЯСЬ ГАРАНТИЯМИ КОММЕРЧЕСКОЙ ПРИГОДНОСТИ, ПРИГОДНОСТИ ДЛЯ КОНКРЕТНОЙ ЦЕЛИ И НЕНАРУШЕНИЯ ПРАВ.
|
||||
|
||||
НИ ПРИ КАКИХ ОБСТОЯТЕЛЬСТВАХ АВТОРЫ ИЛИ ПРАВООБЛАДАТЕЛИ НЕ НЕСУТ ОТВЕТСТВЕННОСТИ ПО КАКИМ-ЛИБО ТРЕБОВАНИЯМ, УБЫТКАМ ИЛИ ИНОЙ ОТВЕТСТВЕННОСТИ, ВОЗНИКАЮЩЕЙ В РЕЗУЛЬТАТЕ ДОГОВОРА, ДЕЛИКТА ИЛИ ИНЫМ ОБРАЗОМ, СВЯЗАННЫМ С ПРОГРАММНЫМ ОБЕСПЕЧЕНИЕМ ИЛИ ЕГО ИСПОЛЬЗОВАНИЕМ.
|
||||
|
||||
В СЛУЧАЕ ЕСЛИ КАКОЕ-ЛИБО ПОЛОЖЕНИЕ НАСТОЯЩЕЙ ЛИЦЕНЗИИ ПРИЗНАЁТСЯ НЕДЕЙСТВИТЕЛЬНЫМ ИЛИ НЕПРИМЕНИМЫМ, ТАКОЕ ПОЛОЖЕНИЕ ПОДЛЕЖИТ ТОЛКОВАНИЮ МАКСИМАЛЬНО БЛИЗКО К ИСХОДНОМУ НАМЕРЕНИЮ СТОРОН, ПРИ ЭТОМ ОСТАЛЬНЫЕ ПОЛОЖЕНИЯ СОХРАНЯЮТ ПОЛНУЮ ЮРИДИЧЕСКУЮ СИЛУ.
|
||||
132
docs/OPENBSD.en.md
Normal file
132
docs/OPENBSD.en.md
Normal file
@@ -0,0 +1,132 @@
|
||||
# Telemt on OpenBSD (Build, Run, and rc.d)
|
||||
|
||||
This guide covers a practical OpenBSD deployment flow for Telemt:
|
||||
- build from source,
|
||||
- install binary and config,
|
||||
- run as an rc.d daemon,
|
||||
- verify basic runtime behavior.
|
||||
|
||||
## 1. Prerequisites
|
||||
|
||||
Install required packages:
|
||||
|
||||
```sh
|
||||
doas pkg_add rust git
|
||||
```
|
||||
|
||||
Notes:
|
||||
- Telemt release installer (`install.sh`) is Linux-only.
|
||||
- On OpenBSD, use source build with `cargo`.
|
||||
|
||||
## 2. Build from source
|
||||
|
||||
```sh
|
||||
git clone https://github.com/telemt/telemt
|
||||
cd telemt
|
||||
cargo build --release
|
||||
./target/release/telemt --version
|
||||
```
|
||||
|
||||
For low-RAM systems, this repository already uses `lto = "thin"` in release profile.
|
||||
|
||||
## 3. Install binary and config
|
||||
|
||||
```sh
|
||||
doas install -d -m 0755 /usr/local/bin
|
||||
doas install -m 0755 ./target/release/telemt /usr/local/bin/telemt
|
||||
|
||||
doas install -d -m 0750 /etc/telemt
|
||||
doas install -m 0640 ./config.toml /etc/telemt/config.toml
|
||||
```
|
||||
|
||||
## 4. Create runtime user
|
||||
|
||||
```sh
|
||||
doas useradd -L daemon -s /sbin/nologin -d /var/empty _telemt
|
||||
```
|
||||
|
||||
If `_telemt` already exists, continue.
|
||||
|
||||
## 5. Install rc.d service
|
||||
|
||||
Install the provided script:
|
||||
|
||||
```sh
|
||||
doas install -m 0555 ./contrib/openbsd/telemt.rcd /etc/rc.d/telemt
|
||||
```
|
||||
|
||||
Enable and start:
|
||||
|
||||
```sh
|
||||
doas rcctl enable telemt
|
||||
# Optional: send daemon output to syslog
|
||||
#doas rcctl set telemt logger daemon.info
|
||||
|
||||
doas rcctl start telemt
|
||||
```
|
||||
|
||||
Service controls:
|
||||
|
||||
```sh
|
||||
doas rcctl check telemt
|
||||
doas rcctl restart telemt
|
||||
doas rcctl stop telemt
|
||||
```
|
||||
|
||||
## 6. Resource limits (recommended)
|
||||
|
||||
OpenBSD rc.d can apply limits via login class. Add class `telemt` and assign it to `_telemt`.
|
||||
|
||||
Example class entry:
|
||||
|
||||
```text
|
||||
telemt:\
|
||||
:openfiles-cur=8192:openfiles-max=16384:\
|
||||
:datasize-cur=768M:datasize-max=1024M:\
|
||||
:coredumpsize=0:\
|
||||
:tc=daemon:
|
||||
```
|
||||
|
||||
These values are conservative defaults for small and medium deployments.
|
||||
Increase `openfiles-*` only if logs show descriptor exhaustion under load.
|
||||
|
||||
Then rebuild database and assign class:
|
||||
|
||||
```sh
|
||||
doas cap_mkdb /etc/login.conf
|
||||
#doas usermod -L telemt _telemt
|
||||
```
|
||||
|
||||
Uncomment `usermod` if you want this class bound to the Telemt user.
|
||||
|
||||
## 7. Functional smoke test
|
||||
|
||||
1. Validate service state:
|
||||
|
||||
```sh
|
||||
doas rcctl check telemt
|
||||
```
|
||||
|
||||
2. Check listener is present (replace 443 if needed):
|
||||
|
||||
```sh
|
||||
netstat -n -f inet -p tcp | grep LISTEN | grep '\.443'
|
||||
```
|
||||
|
||||
3. Verify process user:
|
||||
|
||||
```sh
|
||||
ps -o user,pid,command -ax | grep telemt | grep -v grep
|
||||
```
|
||||
|
||||
4. If startup fails, debug in foreground:
|
||||
|
||||
```sh
|
||||
RUST_LOG=debug /usr/local/bin/telemt /etc/telemt/config.toml
|
||||
```
|
||||
|
||||
## 8. OpenBSD-specific caveats
|
||||
|
||||
- OpenBSD does not support per-socket keepalive retries/interval tuning in the same way as Linux.
|
||||
- Telemt source already uses target-aware cfg gates for keepalive setup.
|
||||
- Use rc.d/rcctl, not systemd.
|
||||
@@ -48,11 +48,16 @@ Save the obtained result somewhere. You will need it later!
|
||||
|
||||
---
|
||||
|
||||
**1. Place your config to /etc/telemt.toml**
|
||||
**1. Place your config to /etc/telemt/telemt.toml**
|
||||
|
||||
Create config directory:
|
||||
```bash
|
||||
mkdir /etc/telemt
|
||||
```
|
||||
|
||||
Open nano
|
||||
```bash
|
||||
nano /etc/telemt.toml
|
||||
nano /etc/telemt/telemt.toml
|
||||
```
|
||||
paste your config
|
||||
|
||||
@@ -60,12 +65,22 @@ paste your config
|
||||
# === General Settings ===
|
||||
[general]
|
||||
# ad_tag = "00000000000000000000000000000000"
|
||||
use_middle_proxy = false
|
||||
|
||||
[general.modes]
|
||||
classic = false
|
||||
secure = false
|
||||
tls = true
|
||||
|
||||
[server]
|
||||
port = 443
|
||||
|
||||
[server.api]
|
||||
enabled = true
|
||||
# listen = "127.0.0.1:9091"
|
||||
# whitelist = ["127.0.0.1/32"]
|
||||
# read_only = true
|
||||
|
||||
# === Anti-Censorship & Masking ===
|
||||
[censorship]
|
||||
tls_domain = "petrovich.ru"
|
||||
@@ -74,6 +89,7 @@ tls_domain = "petrovich.ru"
|
||||
# format: "username" = "32_hex_chars_secret"
|
||||
hello = "00000000000000000000000000000000"
|
||||
```
|
||||
|
||||
then Ctrl+S -> Ctrl+X to save
|
||||
|
||||
> [!WARNING]
|
||||
@@ -82,7 +98,14 @@ then Ctrl+S -> Ctrl+X to save
|
||||
|
||||
---
|
||||
|
||||
**2. Create service on /etc/systemd/system/telemt.service**
|
||||
**2. Create telemt user**
|
||||
|
||||
```bash
|
||||
useradd -d /opt/telemt -m -r -U telemt
|
||||
chown -R telemt:telemt /etc/telemt
|
||||
```
|
||||
|
||||
**3. Create service on /etc/systemd/system/telemt.service**
|
||||
|
||||
Open nano
|
||||
```bash
|
||||
@@ -93,28 +116,43 @@ paste this Systemd Module
|
||||
```bash
|
||||
[Unit]
|
||||
Description=Telemt
|
||||
After=network.target
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/bin
|
||||
ExecStart=/bin/telemt /etc/telemt.toml
|
||||
User=telemt
|
||||
Group=telemt
|
||||
WorkingDirectory=/opt/telemt
|
||||
ExecStart=/bin/telemt /etc/telemt/telemt.toml
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
NoNewPrivileges=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
then Ctrl+S -> Ctrl+X to save
|
||||
|
||||
reload systemd units
|
||||
```bash
|
||||
systemctl daemon-reload
|
||||
```
|
||||
|
||||
**3.** To start it, enter the command `systemctl start telemt`
|
||||
**4.** To start it, enter the command `systemctl start telemt`
|
||||
|
||||
**4.** To get status information, enter `systemctl status telemt`
|
||||
**5.** To get status information, enter `systemctl status telemt`
|
||||
|
||||
**5.** For automatic startup at system boot, enter `systemctl enable telemt`
|
||||
**6.** For automatic startup at system boot, enter `systemctl enable telemt`
|
||||
|
||||
**6.** To get the links, enter `journalctl -u telemt -n -g "links" --no-pager -o cat | tac`
|
||||
**7.** To get the link(s), enter
|
||||
```bash
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||
```
|
||||
|
||||
> Any number of people can use one link.
|
||||
|
||||
---
|
||||
|
||||
@@ -143,6 +181,8 @@ docker compose down
|
||||
docker build -t telemt:local .
|
||||
docker run --name telemt --restart unless-stopped \
|
||||
-p 443:443 \
|
||||
-p 9090:9090 \
|
||||
-p 9091:9091 \
|
||||
-e RUST_LOG=info \
|
||||
-v "$PWD/config.toml:/app/config.toml:ro" \
|
||||
--read-only \
|
||||
|
||||
@@ -48,11 +48,16 @@ python3 -c 'import os; print(os.urandom(16).hex())'
|
||||
|
||||
---
|
||||
|
||||
**1. Поместите свою конфигурацию в файл /etc/telemt.toml**
|
||||
**1. Поместите свою конфигурацию в файл /etc/telemt/telemt.toml**
|
||||
|
||||
Создаём директорию для конфига:
|
||||
```bash
|
||||
mkdir /etc/telemt
|
||||
```
|
||||
|
||||
Открываем nano
|
||||
```bash
|
||||
nano /etc/telemt.toml
|
||||
nano /etc/telemt/telemt.toml
|
||||
```
|
||||
Вставьте свою конфигурацию
|
||||
|
||||
@@ -60,12 +65,22 @@ nano /etc/telemt.toml
|
||||
# === General Settings ===
|
||||
[general]
|
||||
# ad_tag = "00000000000000000000000000000000"
|
||||
use_middle_proxy = false
|
||||
|
||||
[general.modes]
|
||||
classic = false
|
||||
secure = false
|
||||
tls = true
|
||||
|
||||
[server]
|
||||
port = 443
|
||||
|
||||
[server.api]
|
||||
enabled = true
|
||||
# listen = "127.0.0.1:9091"
|
||||
# whitelist = ["127.0.0.1/32"]
|
||||
# read_only = true
|
||||
|
||||
# === Anti-Censorship & Masking ===
|
||||
[censorship]
|
||||
tls_domain = "petrovich.ru"
|
||||
@@ -74,6 +89,7 @@ tls_domain = "petrovich.ru"
|
||||
# format: "username" = "32_hex_chars_secret"
|
||||
hello = "00000000000000000000000000000000"
|
||||
```
|
||||
|
||||
Затем нажмите Ctrl+S -> Ctrl+X, чтобы сохранить
|
||||
|
||||
> [!WARNING]
|
||||
@@ -82,7 +98,14 @@ hello = "00000000000000000000000000000000"
|
||||
|
||||
---
|
||||
|
||||
**2. Создайте службу в /etc/systemd/system/telemt.service**
|
||||
**2. Создайте пользователя для telemt**
|
||||
|
||||
```bash
|
||||
useradd -d /opt/telemt -m -r -U telemt
|
||||
chown -R telemt:telemt /etc/telemt
|
||||
```
|
||||
|
||||
**3. Создайте службу в /etc/systemd/system/telemt.service**
|
||||
|
||||
Открываем nano
|
||||
```bash
|
||||
@@ -93,30 +116,45 @@ nano /etc/systemd/system/telemt.service
|
||||
```bash
|
||||
[Unit]
|
||||
Description=Telemt
|
||||
After=network.target
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/bin
|
||||
ExecStart=/bin/telemt /etc/telemt.toml
|
||||
User=telemt
|
||||
Group=telemt
|
||||
WorkingDirectory=/opt/telemt
|
||||
ExecStart=/bin/telemt /etc/telemt/telemt.toml
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
NoNewPrivileges=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
Затем нажмите Ctrl+S -> Ctrl+X, чтобы сохранить
|
||||
|
||||
перезагрузите конфигурацию systemd
|
||||
```bash
|
||||
systemctl daemon-reload
|
||||
```
|
||||
|
||||
**3.** Для запуска введите команду `systemctl start telemt`
|
||||
**4.** Для запуска введите команду `systemctl start telemt`
|
||||
|
||||
**4.** Для получения информации о статусе введите `systemctl status telemt`
|
||||
**5.** Для получения информации о статусе введите `systemctl status telemt`
|
||||
|
||||
**5.** Для автоматического запуска при запуске системы в введите `systemctl enable telemt`
|
||||
**6.** Для автоматического запуска при запуске системы в введите `systemctl enable telemt`
|
||||
|
||||
**7.** Для получения ссылки/ссылок введите
|
||||
```bash
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||
```
|
||||
> Одной ссылкой может пользоваться сколько угодно человек.
|
||||
|
||||
**6.** Для получения ссылки введите `journalctl -u telemt -n -g "links" --no-pager -o cat | tac`
|
||||
> [!WARNING]
|
||||
> Рабочую ссылку может выдать только команда из 6 пункта. Не пытайтесь делать ее самостоятельно или копировать откуда-либо!
|
||||
> Рабочую ссылку может выдать только команда из 7 пункта. Не пытайтесь делать ее самостоятельно или копировать откуда-либо если вы не уверены в том, что делаете!
|
||||
|
||||
---
|
||||
|
||||
@@ -140,11 +178,13 @@ docker compose down
|
||||
> - По умолчанию публикуются порты 443:443, а контейнер запускается со сброшенными привилегиями (добавлена только `NET_BIND_SERVICE`)
|
||||
> - Если вам действительно нужна сеть хоста (обычно это требуется только для некоторых конфигураций IPv6), раскомментируйте `network_mode: host`
|
||||
|
||||
**Запуск в Docker Compose**
|
||||
**Запуск без Docker Compose**
|
||||
```bash
|
||||
docker build -t telemt:local .
|
||||
docker run --name telemt --restart unless-stopped \
|
||||
-p 443:443 \
|
||||
-p 9090:9090 \
|
||||
-p 9091:9091 \
|
||||
-e RUST_LOG=info \
|
||||
-v "$PWD/config.toml:/app/config.toml:ro" \
|
||||
--read-only \
|
||||
|
||||
@@ -82,7 +82,7 @@ Die unten angegebenen `Default`-Werte sind Code-Defaults (bei fehlendem Schlüss
|
||||
|
||||
| Feld | Gilt für | Typ | Pflicht | Default | Bedeutung |
|
||||
|---|---|---|---|---|---|
|
||||
| `[[upstreams]].type` | alle Upstreams | `"direct" \| "socks4" \| "socks5"` | ja | n/a | Upstream-Transporttyp. |
|
||||
| `[[upstreams]].type` | alle Upstreams | `"direct" \| "socks4" \| "socks5" \| "shadowsocks"` | ja | n/a | Upstream-Transporttyp. |
|
||||
| `[[upstreams]].weight` | alle Upstreams | `u16` | nein | `1` | Basisgewicht für weighted-random Auswahl. |
|
||||
| `[[upstreams]].enabled` | alle Upstreams | `bool` | nein | `true` | Deaktivierte Einträge werden beim Start ignoriert. |
|
||||
| `[[upstreams]].scopes` | alle Upstreams | `String` | nein | `""` | Komma-separierte Scope-Tags für Request-Routing. |
|
||||
@@ -95,6 +95,8 @@ Die unten angegebenen `Default`-Werte sind Code-Defaults (bei fehlendem Schlüss
|
||||
| `interface` | `socks5` | `Option<String>` | nein | `null` | Wird nur genutzt, wenn `address` als `ip:port` angegeben ist. |
|
||||
| `username` | `socks5` | `Option<String>` | nein | `null` | SOCKS5 Benutzername. |
|
||||
| `password` | `socks5` | `Option<String>` | nein | `null` | SOCKS5 Passwort. |
|
||||
| `url` | `shadowsocks` | `String` | ja | n/a | Shadowsocks-SIP002-URL (`ss://...`). In Runtime-APIs wird nur `host:port` offengelegt. |
|
||||
| `interface` | `shadowsocks` | `Option<String>` | nein | `null` | Optionales ausgehendes Bind-Interface oder lokale Literal-IP. |
|
||||
|
||||
### Runtime-Regeln (wichtig)
|
||||
|
||||
@@ -115,6 +117,7 @@ Die unten angegebenen `Default`-Werte sind Code-Defaults (bei fehlendem Schlüss
|
||||
8. Im ME-Modus wird der gewählte Upstream auch für den ME-TCP-Dial-Pfad verwendet.
|
||||
9. Im ME-Modus ist bei `direct` mit bind/interface die STUN-Reflection bind-aware für KDF-Adressmaterial.
|
||||
10. Im ME-Modus werden bei SOCKS-Upstream `BND.ADDR/BND.PORT` für KDF verwendet, wenn gültig/öffentlich und gleiche IP-Familie.
|
||||
11. `shadowsocks`-Upstreams erfordern `general.use_middle_proxy = false`. Mit aktiviertem ME-Modus schlägt das Laden der Config sofort fehl.
|
||||
|
||||
## Upstream-Konfigurationsbeispiele
|
||||
|
||||
@@ -150,7 +153,20 @@ weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Beispiel 4: Gemischte Upstreams mit Scopes
|
||||
### Beispiel 4: Shadowsocks-Upstream
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
[[upstreams]]
|
||||
type = "shadowsocks"
|
||||
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@198.51.100.50:8388"
|
||||
weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Beispiel 5: Gemischte Upstreams mit Scopes
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
|
||||
@@ -82,7 +82,7 @@ Defaults below are code defaults (used when a key is omitted), not necessarily v
|
||||
|
||||
| Field | Applies to | Type | Required | Default | Meaning |
|
||||
|---|---|---|---|---|---|
|
||||
| `[[upstreams]].type` | all upstreams | `"direct" \| "socks4" \| "socks5"` | yes | n/a | Upstream transport type. |
|
||||
| `[[upstreams]].type` | all upstreams | `"direct" \| "socks4" \| "socks5" \| "shadowsocks"` | yes | n/a | Upstream transport type. |
|
||||
| `[[upstreams]].weight` | all upstreams | `u16` | no | `1` | Base weight for weighted-random selection. |
|
||||
| `[[upstreams]].enabled` | all upstreams | `bool` | no | `true` | Disabled entries are ignored at startup. |
|
||||
| `[[upstreams]].scopes` | all upstreams | `String` | no | `""` | Comma-separated scope tags for request-level routing. |
|
||||
@@ -95,6 +95,8 @@ Defaults below are code defaults (used when a key is omitted), not necessarily v
|
||||
| `interface` | `socks5` | `Option<String>` | no | `null` | Used only for SOCKS server `ip:port` dial path. |
|
||||
| `username` | `socks5` | `Option<String>` | no | `null` | SOCKS5 username auth. |
|
||||
| `password` | `socks5` | `Option<String>` | no | `null` | SOCKS5 password auth. |
|
||||
| `url` | `shadowsocks` | `String` | yes | n/a | Shadowsocks SIP002 URL (`ss://...`). Only `host:port` is exposed in runtime APIs. |
|
||||
| `interface` | `shadowsocks` | `Option<String>` | no | `null` | Optional outgoing bind interface or literal local IP. |
|
||||
|
||||
### Runtime rules (important)
|
||||
|
||||
@@ -115,6 +117,7 @@ Defaults below are code defaults (used when a key is omitted), not necessarily v
|
||||
8. In ME mode, the selected upstream is also used for ME TCP dial path.
|
||||
9. In ME mode for `direct` upstream with bind/interface, STUN reflection logic is bind-aware for KDF source material.
|
||||
10. In ME mode for SOCKS upstream, SOCKS `BND.ADDR/BND.PORT` is used for KDF when it is valid/public for the same family.
|
||||
11. `shadowsocks` upstreams require `general.use_middle_proxy = false`. Config load fails fast if ME mode is enabled.
|
||||
|
||||
## Upstream Configuration Examples
|
||||
|
||||
@@ -150,7 +153,20 @@ weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Example 4: Mixed upstreams with scopes
|
||||
### Example 4: Shadowsocks upstream
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
[[upstreams]]
|
||||
type = "shadowsocks"
|
||||
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@198.51.100.50:8388"
|
||||
weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Example 5: Mixed upstreams with scopes
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
|
||||
@@ -82,7 +82,7 @@
|
||||
|
||||
| Поле | Применимость | Тип | Обязательно | Default | Назначение |
|
||||
|---|---|---|---|---|---|
|
||||
| `[[upstreams]].type` | все upstream | `"direct" \| "socks4" \| "socks5"` | да | n/a | Тип upstream транспорта. |
|
||||
| `[[upstreams]].type` | все upstream | `"direct" \| "socks4" \| "socks5" \| "shadowsocks"` | да | n/a | Тип upstream транспорта. |
|
||||
| `[[upstreams]].weight` | все upstream | `u16` | нет | `1` | Базовый вес в weighted-random выборе. |
|
||||
| `[[upstreams]].enabled` | все upstream | `bool` | нет | `true` | Выключенные записи игнорируются на старте. |
|
||||
| `[[upstreams]].scopes` | все upstream | `String` | нет | `""` | Список scope-токенов через запятую для маршрутизации. |
|
||||
@@ -95,6 +95,8 @@
|
||||
| `interface` | `socks5` | `Option<String>` | нет | `null` | Используется только если `address` задан как `ip:port`. |
|
||||
| `username` | `socks5` | `Option<String>` | нет | `null` | Логин SOCKS5 auth. |
|
||||
| `password` | `socks5` | `Option<String>` | нет | `null` | Пароль SOCKS5 auth. |
|
||||
| `url` | `shadowsocks` | `String` | да | n/a | Shadowsocks SIP002 URL (`ss://...`). В runtime API раскрывается только `host:port`. |
|
||||
| `interface` | `shadowsocks` | `Option<String>` | нет | `null` | Необязательный исходящий bind-интерфейс или literal локальный IP. |
|
||||
|
||||
### Runtime-правила
|
||||
|
||||
@@ -115,6 +117,7 @@
|
||||
8. В ME-режиме выбранный upstream также используется для ME TCP dial path.
|
||||
9. В ME-режиме для `direct` upstream с bind/interface STUN-рефлексия выполняется bind-aware для KDF материала.
|
||||
10. В ME-режиме для SOCKS upstream используются `BND.ADDR/BND.PORT` для KDF, если адрес валиден/публичен и соответствует IP family.
|
||||
11. `shadowsocks` upstream требует `general.use_middle_proxy = false`. При включенном ME-режиме конфиг отклоняется при загрузке.
|
||||
|
||||
## Примеры конфигурации Upstreams
|
||||
|
||||
@@ -150,7 +153,20 @@ weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Пример 4: смешанные upstream с scopes
|
||||
### Пример 4: Shadowsocks upstream
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
[[upstreams]]
|
||||
type = "shadowsocks"
|
||||
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@198.51.100.50:8388"
|
||||
weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Пример 5: смешанные upstream с scopes
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
|
||||
287
docs/VPS_DOUBLE_HOP.en.md
Normal file
287
docs/VPS_DOUBLE_HOP.en.md
Normal file
@@ -0,0 +1,287 @@
|
||||
<img src="https://gist.githubusercontent.com/avbor/1f8a128e628f47249aae6e058a57610b/raw/19013276c035e91058e0a9799ab145f8e70e3ff5/scheme.svg">
|
||||
|
||||
## Concept
|
||||
- **Server A** (__conditionally Russian Federation_):\
|
||||
Entry point, receives Telegram proxy user traffic via **HAProxy** (port `443`)\
|
||||
and sends it to the tunnel to Server **B**.\
|
||||
Internal IP in the tunnel — `10.10.10.2`\
|
||||
Port for HAProxy clients — `443\tcp`
|
||||
- **Server B** (_conditionally Netherlands_):\
|
||||
Exit point, runs **telemt** and accepts client connections through Server **A**.\
|
||||
The server must have unrestricted access to Telegram servers.\
|
||||
Internal IP in the tunnel — `10.10.10.1`\
|
||||
AmneziaWG port — `8443\udp`\
|
||||
Port for telemt clients — `443\tcp`
|
||||
|
||||
---
|
||||
|
||||
## Step 1. Setting up the AmneziaWG tunnel (A <-> B)
|
||||
[AmneziaWG](https://github.com/amnezia-vpn/amneziawg-linux-kernel-module) must be installed on all servers.\
|
||||
All following commands are given for **Ubuntu 24.04**.\
|
||||
For RHEL-based distributions, installation instructions are available at the link above.
|
||||
|
||||
### Installing AmneziaWG (Servers A and B)
|
||||
The following steps must be performed on each server:
|
||||
|
||||
#### 1. Adding the AmneziaWG repository and installing required packages:
|
||||
```bash
|
||||
sudo apt install -y software-properties-common python3-launchpadlib gnupg2 linux-headers-$(uname -r) && \
|
||||
sudo add-apt-repository ppa:amnezia/ppa && \
|
||||
sudo apt-get install -y amneziawg
|
||||
```
|
||||
|
||||
#### 2. Generating a unique key pair:
|
||||
```bash
|
||||
cd /etc/amnezia/amneziawg && \
|
||||
awg genkey | tee private.key | awg pubkey > public.key
|
||||
```
|
||||
|
||||
As a result, you will get two files in the `/etc/amnezia/amneziawg` folder:\
|
||||
`private.key` - private, and\
|
||||
`public.key` - public server keys
|
||||
|
||||
#### 3. Configuring network interfaces:
|
||||
Obfuscation parameters `S1`, `S2`, `H1`, `H2`, `H3`, `H4` must be strictly identical on both servers.\
|
||||
Parameters `Jc`, `Jmin` and `Jmax` can differ.\
|
||||
Parameters `I1-I5` ([Custom Protocol Signature](https://docs.amnezia.org/documentation/amnezia-wg/)) must be specified on the client side (Server **A**).
|
||||
|
||||
Recommendations for choosing values:
|
||||
|
||||
```text
|
||||
Jc — 1 ≤ Jc ≤ 128; from 4 to 12 inclusive
|
||||
Jmin — Jmax > Jmin < 1280*; recommended 8
|
||||
Jmax — Jmin < Jmax ≤ 1280*; recommended 80
|
||||
S1 — S1 ≤ 1132* (1280* - 148 = 1132); S1 + 56 ≠ S2;
|
||||
recommended range from 15 to 150 inclusive
|
||||
S2 — S2 ≤ 1188* (1280* - 92 = 1188);
|
||||
recommended range from 15 to 150 inclusive
|
||||
H1/H2/H3/H4 — must be unique and differ from each other;
|
||||
recommended range from 5 to 2147483647 inclusive
|
||||
|
||||
* It is assumed that the Internet connection has an MTU of 1280.
|
||||
```
|
||||
|
||||
> [!IMPORTANT]
|
||||
> It is recommended to use your own, unique values.\
|
||||
> You can use the [generator](https://htmlpreview.github.io/?https://gist.githubusercontent.com/avbor/955782b5c37b06240b243aa375baeac5/raw/13f5517ca473b47c412b9a99407066de973732bd/awg-gen.html) to select parameters.
|
||||
|
||||
#### Server B Configuration (Netherlands):
|
||||
|
||||
Create the interface configuration file (`awg0`)
|
||||
```bash
|
||||
nano /etc/amnezia/amneziawg/awg0.conf
|
||||
```
|
||||
|
||||
File content
|
||||
```ini
|
||||
[Interface]
|
||||
Address = 10.10.10.1/24
|
||||
ListenPort = 8443
|
||||
PrivateKey = <PRIVATE_KEY_SERVER_B>
|
||||
SaveConfig = true
|
||||
Jc = 4
|
||||
Jmin = 8
|
||||
Jmax = 80
|
||||
S1 = 29
|
||||
S2 = 15
|
||||
S3 = 18
|
||||
S4 = 0
|
||||
H1 = 2087563914
|
||||
H2 = 188817757
|
||||
H3 = 101784570
|
||||
H4 = 432174303
|
||||
|
||||
[Peer]
|
||||
PublicKey = <PUBLIC_KEY_SERVER_A>
|
||||
AllowedIPs = 10.10.10.2/32
|
||||
```
|
||||
`ListenPort` - the port on which the server will wait for connections, you can choose any free one.\
|
||||
`<PRIVATE_KEY_SERVER_B>` - the content of the `private.key` file from Server **B**.\
|
||||
`<PUBLIC_KEY_SERVER_A>` - the content of the `public.key` file from Server **A**.
|
||||
|
||||
Open the port on the firewall (if enabled):
|
||||
```bash
|
||||
sudo ufw allow from <PUBLIC_IP_SERVER_A> to any port 8443 proto udp
|
||||
```
|
||||
|
||||
`<PUBLIC_IP_SERVER_A>` - the external IP address of Server **A**.
|
||||
|
||||
#### Server A Configuration (Russian Federation):
|
||||
Create the interface configuration file (awg0)
|
||||
|
||||
```bash
|
||||
nano /etc/amnezia/amneziawg/awg0.conf
|
||||
```
|
||||
|
||||
File content
|
||||
```ini
|
||||
[Interface]
|
||||
Address = 10.10.10.2/24
|
||||
PrivateKey = <PRIVATE_KEY_SERVER_A>
|
||||
Jc = 4
|
||||
Jmin = 8
|
||||
Jmax = 80
|
||||
S1 = 29
|
||||
S2 = 15
|
||||
S3 = 18
|
||||
S4 = 0
|
||||
H1 = 2087563914
|
||||
H2 = 188817757
|
||||
H3 = 101784570
|
||||
H4 = 432174303
|
||||
I1 = <b 0xc10000000108981eba846e21f74e00>
|
||||
I2 = <b 0xc20000000108981eba846e21f74e00>
|
||||
I3 = <b 0xc30000000108981eba846e21f74e00>
|
||||
I4 = <b 0x43981eba846e21f74e>
|
||||
I5 = <b 0x43981eba846e21f74e>
|
||||
|
||||
[Peer]
|
||||
PublicKey = <PUBLIC_KEY_SERVER_B>
|
||||
Endpoint = <PUBLIC_IP_SERVER_B>:8443
|
||||
AllowedIPs = 10.10.10.1/32
|
||||
PersistentKeepalive = 25
|
||||
```
|
||||
|
||||
`<PRIVATE_KEY_SERVER_A>` - the content of the `private.key` file from Server **A**.\
|
||||
`<PUBLIC_KEY_SERVER_B>` - the content of the `public.key` file from Server **B**.\
|
||||
`<PUBLIC_IP_SERVER_B>` - the public IP address of Server **B**.
|
||||
|
||||
Enable the tunnel on both servers:
|
||||
```bash
|
||||
sudo systemctl enable --now awg-quick@awg0
|
||||
```
|
||||
|
||||
Make sure Server B is accessible from Server A through the tunnel.
|
||||
```bash
|
||||
ping 10.10.10.1
|
||||
PING 10.10.10.1 (10.10.10.1) 56(84) bytes of data.
|
||||
64 bytes from 10.10.10.1: icmp_seq=1 ttl=64 time=35.1 ms
|
||||
64 bytes from 10.10.10.1: icmp_seq=2 ttl=64 time=35.0 ms
|
||||
64 bytes from 10.10.10.1: icmp_seq=3 ttl=64 time=35.1 ms
|
||||
^C
|
||||
```
|
||||
---
|
||||
|
||||
## Step 2. Installing telemt on Server B (conditionally Netherlands)
|
||||
Installation and configuration are described [here](https://github.com/telemt/telemt/blob/main/docs/QUICK_START_GUIDE.ru.md) or [here](https://gitlab.com/An0nX/telemt-docker#-quick-start-docker-compose).\
|
||||
It is assumed that telemt expects connections on port `443\tcp`.
|
||||
|
||||
In the telemt config, you must enable the `Proxy` protocol and restrict connections to it only through the tunnel.
|
||||
```toml
|
||||
[server]
|
||||
port = 443
|
||||
listen_addr_ipv4 = "10.10.10.1"
|
||||
proxy_protocol = true
|
||||
```
|
||||
|
||||
Also, for correct link generation, specify the FQDN or IP address and port of Server `A`
|
||||
```toml
|
||||
[general.links]
|
||||
show = "*"
|
||||
public_host = "<FQDN_OR_IP_SERVER_A>"
|
||||
public_port = 443
|
||||
```
|
||||
|
||||
Open the port on the firewall (if enabled):
|
||||
```bash
|
||||
sudo ufw allow from 10.10.10.2 to any port 443 proto tcp
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 3. Configuring HAProxy on Server A (Russian Federation)
|
||||
Since the version in the standard Ubuntu repository is relatively old, it makes sense to use the official Docker image.\
|
||||
[Instructions](https://docs.docker.com/engine/install/ubuntu/) for installing Docker on Ubuntu.
|
||||
|
||||
> [!WARNING]
|
||||
> By default, regular users do not have rights to use ports < 1024.
|
||||
> Attempts to run HAProxy on port 443 can lead to errors:
|
||||
> ```
|
||||
> [ALERT] (8) : Binding [/usr/local/etc/haproxy/haproxy.cfg:17] for frontend tcp_in_443:
|
||||
> protocol tcpv4: cannot bind socket (Permission denied) for [0.0.0.0:443].
|
||||
> ```
|
||||
> There are two simple ways to bypass this restriction, choose one:
|
||||
> 1. At the OS level, change the net.ipv4.ip_unprivileged_port_start setting to allow users to use all ports:
|
||||
> ```
|
||||
> echo "net.ipv4.ip_unprivileged_port_start = 0" | sudo tee -a /etc/sysctl.conf && sudo sysctl -p
|
||||
> ```
|
||||
> or
|
||||
>
|
||||
> 2. Run HAProxy as root:
|
||||
> Uncomment the `user: "root"` parameter in docker-compose.yaml.
|
||||
|
||||
#### Create a folder for HAProxy:
|
||||
```bash
|
||||
mkdir -p /opt/docker-compose/haproxy && cd $_
|
||||
```
|
||||
|
||||
#### Create the docker-compose.yaml file
|
||||
`nano docker-compose.yaml`
|
||||
|
||||
File content
|
||||
```yaml
|
||||
services:
|
||||
haproxy:
|
||||
image: haproxy:latest
|
||||
container_name: haproxy
|
||||
restart: unless-stopped
|
||||
# user: "root"
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "1"
|
||||
```
|
||||
|
||||
#### Create the haproxy.cfg config file
|
||||
Accept connections on port 443\tcp and send them through the tunnel to Server `B` 10.10.10.1:443
|
||||
|
||||
`nano haproxy.cfg`
|
||||
|
||||
File content
|
||||
|
||||
```haproxy
|
||||
global
|
||||
log stdout format raw local0
|
||||
maxconn 10000
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode tcp
|
||||
option tcplog
|
||||
option clitcpka
|
||||
option srvtcpka
|
||||
timeout connect 5s
|
||||
timeout client 2h
|
||||
timeout server 2h
|
||||
timeout check 5s
|
||||
|
||||
frontend tcp_in_443
|
||||
bind *:443
|
||||
maxconn 8000
|
||||
option tcp-smart-accept
|
||||
default_backend telemt_nodes
|
||||
|
||||
backend telemt_nodes
|
||||
option tcp-smart-connect
|
||||
server server_a 10.10.10.1:443 check inter 5s rise 2 fall 3 send-proxy-v2
|
||||
|
||||
|
||||
```
|
||||
> [!WARNING]
|
||||
> **The file must end with an empty line, otherwise HAProxy will not start!**
|
||||
|
||||
#### Allow port 443\tcp in the firewall (if enabled)
|
||||
```bash
|
||||
sudo ufw allow 443/tcp
|
||||
```
|
||||
|
||||
#### Start the HAProxy container
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
If everything is configured correctly, you can now try connecting Telegram clients using links from the telemt log\api.
|
||||
291
docs/VPS_DOUBLE_HOP.ru.md
Normal file
291
docs/VPS_DOUBLE_HOP.ru.md
Normal file
@@ -0,0 +1,291 @@
|
||||
<img src="https://gist.githubusercontent.com/avbor/1f8a128e628f47249aae6e058a57610b/raw/19013276c035e91058e0a9799ab145f8e70e3ff5/scheme.svg">
|
||||
|
||||
## Концепция
|
||||
- **Сервер A** (_РФ_):\
|
||||
Точка входа, принимает трафик пользователей Telegram-прокси через **HAProxy** (порт `443`)\
|
||||
и отправляет в туннель на Сервер **B**.\
|
||||
Внутренний IP в туннеле — `10.10.10.2`\
|
||||
Порт для клиентов HAProxy — `443\tcp`
|
||||
- **Сервер B** (_условно Нидерланды_):\
|
||||
Точка выхода, на нем работает **telemt** и принимает подключения клиентов через Сервер **A**.\
|
||||
На сервере должен быть неограниченный доступ до серверов Telegram.\
|
||||
Внутренний IP в туннеле — `10.10.10.1`\
|
||||
Порт AmneziaWG — `8443\udp`\
|
||||
Порт для клиентов telemt — `443\tcp`
|
||||
|
||||
---
|
||||
|
||||
## Шаг 1. Настройка туннеля AmneziaWG (A <-> B)
|
||||
|
||||
На всех серверах необходимо установить [amneziawg](https://github.com/amnezia-vpn/amneziawg-linux-kernel-module).\
|
||||
Далее все команды даны для **Ununtu 24.04**.\
|
||||
Для RHEL-based дистрибутивов инструкция по установке есть по ссылке выше.
|
||||
|
||||
### Установка AmneziaWG (Сервера A и B)
|
||||
На каждом из серверов необходимо выполнить следующие шаги:
|
||||
|
||||
#### 1. Добавление репозитория AmneziaWG и установка необходимых пакетов:
|
||||
```bash
|
||||
sudo apt install -y software-properties-common python3-launchpadlib gnupg2 linux-headers-$(uname -r) && \
|
||||
sudo add-apt-repository ppa:amnezia/ppa && \
|
||||
sudo apt-get install -y amneziawg
|
||||
```
|
||||
|
||||
#### 2. Генерация уникальной пары ключей:
|
||||
```bash
|
||||
cd /etc/amnezia/amneziawg && \
|
||||
awg genkey | tee private.key | awg pubkey > public.key
|
||||
```
|
||||
В результате вы получите в папке `/etc/amnezia/amneziawg` два файла:\
|
||||
`private.key` - приватный и\
|
||||
`public.key` - публичный ключи сервера
|
||||
|
||||
#### 3. Настройка сетевых интерфейсов:
|
||||
|
||||
Параметры обфускации `S1`, `S2`, `H1`, `H2`, `H3`, `H4` должны быть строго идентичными на обоих серверах.\
|
||||
Параметры `Jc`, `Jmin` и `Jmax` могут отличатся.\
|
||||
Параметры `I1-I5` ([Custom Protocol Signature](https://docs.amnezia.org/documentation/amnezia-wg/)) нужно указывать на стороне _клиента_ (Сервер **А**).
|
||||
|
||||
Рекомендации по выбору значений:
|
||||
```text
|
||||
Jc — 1 ≤ Jc ≤ 128; от 4 до 12 включительно
|
||||
Jmin — Jmax > Jmin < 1280*; рекомендовано 8
|
||||
Jmax — Jmin < Jmax ≤ 1280*; рекомендовано 80
|
||||
S1 — S1 ≤ 1132* (1280* - 148 = 1132); S1 + 56 ≠ S2;
|
||||
рекомендованный диапазон от 15 до 150 включительно
|
||||
S2 — S2 ≤ 1188* (1280* - 92 = 1188);
|
||||
рекомендованный диапазон от 15 до 150 включительно
|
||||
H1/H2/H3/H4 — должны быть уникальны и отличаться друг от друга;
|
||||
рекомендованный диапазон от 5 до 2147483647 включительно
|
||||
|
||||
* Предполагается, что подключение к Интернету имеет MTU 1280.
|
||||
```
|
||||
> [!IMPORTANT]
|
||||
> Рекомендуется использовать собственные, уникальные значения.\
|
||||
> Для выбора параметров можете воспользоваться [генератором](https://htmlpreview.github.io/?https://gist.githubusercontent.com/avbor/955782b5c37b06240b243aa375baeac5/raw/13f5517ca473b47c412b9a99407066de973732bd/awg-gen.html).
|
||||
|
||||
#### Конфигурация Сервера B (_Нидерланды_):
|
||||
|
||||
Создаем файл конфигурации интерфейса (`awg0`)
|
||||
```bash
|
||||
nano /etc/amnezia/amneziawg/awg0.conf
|
||||
```
|
||||
|
||||
Содержимое файла
|
||||
```ini
|
||||
[Interface]
|
||||
Address = 10.10.10.1/24
|
||||
ListenPort = 8443
|
||||
PrivateKey = <PRIVATE_KEY_SERVER_B>
|
||||
SaveConfig = true
|
||||
Jc = 4
|
||||
Jmin = 8
|
||||
Jmax = 80
|
||||
S1 = 29
|
||||
S2 = 15
|
||||
S3 = 18
|
||||
S4 = 0
|
||||
H1 = 2087563914
|
||||
H2 = 188817757
|
||||
H3 = 101784570
|
||||
H4 = 432174303
|
||||
|
||||
[Peer]
|
||||
PublicKey = <PUBLIC_KEY_SERVER_A>
|
||||
AllowedIPs = 10.10.10.2/32
|
||||
```
|
||||
|
||||
`ListenPort` - порт, на котором сервер будет ждать подключения, можете выбрать любой свободный.\
|
||||
`<PRIVATE_KEY_SERVER_B>` - содержимое файла `private.key` с сервера **B**.\
|
||||
`<PUBLIC_KEY_SERVER_A>` - содержимое файла `public.key` с сервера **A**.
|
||||
|
||||
Открываем порт на фаерволе (если включен):
|
||||
```bash
|
||||
sudo ufw allow from <PUBLIC_IP_SERVER_A> to any port 8443 proto udp
|
||||
```
|
||||
|
||||
`<PUBLIC_IP_SERVER_A>` - внешний IP адрес Сервера **A**.
|
||||
|
||||
#### Конфигурация Сервера A (_РФ_):
|
||||
|
||||
Создаем файл конфигурации интерфейса (`awg0`)
|
||||
```bash
|
||||
nano /etc/amnezia/amneziawg/awg0.conf
|
||||
```
|
||||
|
||||
Содержимое файла
|
||||
```ini
|
||||
[Interface]
|
||||
Address = 10.10.10.2/24
|
||||
PrivateKey = <PRIVATE_KEY_SERVER_A>
|
||||
Jc = 4
|
||||
Jmin = 8
|
||||
Jmax = 80
|
||||
S1 = 29
|
||||
S2 = 15
|
||||
S3 = 18
|
||||
S4 = 0
|
||||
H1 = 2087563914
|
||||
H2 = 188817757
|
||||
H3 = 101784570
|
||||
H4 = 432174303
|
||||
I1 = <b 0xc10000000108981eba846e21f74e00>
|
||||
I2 = <b 0xc20000000108981eba846e21f74e00>
|
||||
I3 = <b 0xc30000000108981eba846e21f74e00>
|
||||
I4 = <b 0x43981eba846e21f74e>
|
||||
I5 = <b 0x43981eba846e21f74e>
|
||||
|
||||
[Peer]
|
||||
PublicKey = <PUBLIC_KEY_SERVER_B>
|
||||
Endpoint = <PUBLIC_IP_SERVER_B>:8443
|
||||
AllowedIPs = 10.10.10.1/32
|
||||
PersistentKeepalive = 25
|
||||
```
|
||||
|
||||
`<PRIVATE_KEY_SERVER_A>` - содержимое файла `private.key` с сервера **A**.\
|
||||
`<PUBLIC_KEY_SERVER_B>` - содержимое файла `public.key` с сервера **B**.\
|
||||
`<PUBLIC_IP_SERVER_B>` - публичный IP адресс сервера **B**.
|
||||
|
||||
#### Включаем туннель на обоих серверах:
|
||||
```bash
|
||||
sudo systemctl enable --now awg-quick@awg0
|
||||
```
|
||||
|
||||
Убедитесь, что с Сервера `A` доступен Сервер `B` через туннель.
|
||||
```bash
|
||||
ping 10.10.10.1
|
||||
PING 10.10.10.1 (10.10.10.1) 56(84) bytes of data.
|
||||
64 bytes from 10.10.10.1: icmp_seq=1 ttl=64 time=35.1 ms
|
||||
64 bytes from 10.10.10.1: icmp_seq=2 ttl=64 time=35.0 ms
|
||||
64 bytes from 10.10.10.1: icmp_seq=3 ttl=64 time=35.1 ms
|
||||
^C
|
||||
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Шаг 2. Установка telemt на Сервере B (_условно Нидерланды_)
|
||||
|
||||
Установка и настройка описаны [здесь](https://github.com/telemt/telemt/blob/main/docs/QUICK_START_GUIDE.ru.md) или [здесь](https://gitlab.com/An0nX/telemt-docker#-quick-start-docker-compose).\
|
||||
Подразумевается что telemt ожидает подключения на порту `443\tcp`.
|
||||
|
||||
В конфиге telemt необходимо включить протокол `Proxy` и ограничить подключения к нему только через туннель.
|
||||
|
||||
```toml
|
||||
[server]
|
||||
port = 443
|
||||
listen_addr_ipv4 = "10.10.10.1"
|
||||
proxy_protocol = true
|
||||
```
|
||||
|
||||
А также, для правильной генерации ссылок, указать FQDN или IP адрес и порт Сервера `A`
|
||||
|
||||
```toml
|
||||
[general.links]
|
||||
show = "*"
|
||||
public_host = "<FQDN_OR_IP_SERVER_A>"
|
||||
public_port = 443
|
||||
```
|
||||
|
||||
Открываем порт на фаерволе (если включен):
|
||||
```bash
|
||||
sudo ufw allow from 10.10.10.2 to any port 443 proto tcp
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Шаг 3. Настройка HAProxy на Сервере A (_РФ_)
|
||||
|
||||
Т.к. в стандартном репозитории Ubuntu версия относительно старая, имеет смысл воспользоваться официальным образом Docker.\
|
||||
[Инструкция](https://docs.docker.com/engine/install/ubuntu/) по установке Docker на Ubuntu.
|
||||
|
||||
> [!WARNING]
|
||||
> По умолчанию у обычных пользователей нет прав на использование портов < 1024.\
|
||||
> Попытки запустить HAProxy на 443 порту могут приводить к ошибкам:
|
||||
> ```
|
||||
> [ALERT] (8) : Binding [/usr/local/etc/haproxy/haproxy.cfg:17] for frontend tcp_in_443:
|
||||
> protocol tcpv4: cannot bind socket (Permission denied) for [0.0.0.0:443].
|
||||
> ```
|
||||
> Есть два простых способа обойти это ограничение, выберите что-то одно:
|
||||
> 1. На уровне ОС изменить настройку net.ipv4.ip_unprivileged_port_start, разрешив пользователям использовать все порты:
|
||||
> ```
|
||||
> echo "net.ipv4.ip_unprivileged_port_start = 0" | sudo tee -a /etc/sysctl.conf && sudo sysctl -p
|
||||
> ```
|
||||
> или
|
||||
>
|
||||
> 2. Запустить HAProxy под root:\
|
||||
> Раскомментируйте в docker-compose.yaml параметр `user: "root"`.
|
||||
|
||||
#### Создаем папку для HAProxy:
|
||||
```bash
|
||||
mkdir -p /opt/docker-compose/haproxy && cd $_
|
||||
```
|
||||
#### Создаем файл docker-compose.yaml
|
||||
|
||||
`nano docker-compose.yaml`
|
||||
|
||||
Содержимое файла
|
||||
```yaml
|
||||
services:
|
||||
haproxy:
|
||||
image: haproxy:latest
|
||||
container_name: haproxy
|
||||
restart: unless-stopped
|
||||
# user: "root"
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "1"
|
||||
```
|
||||
#### Создаем файл конфига haproxy.cfg
|
||||
Принимаем подключения на порту 443\tcp и отправляем их через туннель на Сервер `B` 10.10.10.1:443
|
||||
|
||||
`nano haproxy.cfg`
|
||||
|
||||
Содержимое файла
|
||||
```haproxy
|
||||
global
|
||||
log stdout format raw local0
|
||||
maxconn 10000
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode tcp
|
||||
option tcplog
|
||||
option clitcpka
|
||||
option srvtcpka
|
||||
timeout connect 5s
|
||||
timeout client 2h
|
||||
timeout server 2h
|
||||
timeout check 5s
|
||||
|
||||
frontend tcp_in_443
|
||||
bind *:443
|
||||
maxconn 8000
|
||||
option tcp-smart-accept
|
||||
default_backend telemt_nodes
|
||||
|
||||
backend telemt_nodes
|
||||
option tcp-smart-connect
|
||||
server server_a 10.10.10.1:443 check inter 5s rise 2 fall 3 send-proxy-v2
|
||||
|
||||
|
||||
```
|
||||
>[!WARNING]
|
||||
>**Файл должен заканчиваться пустой строкой, иначе HAProxy не запустится!**
|
||||
|
||||
#### Разрешаем порт 443\tcp в фаерволе (если включен)
|
||||
```bash
|
||||
sudo ufw allow 443/tcp
|
||||
```
|
||||
|
||||
#### Запускаем контейнер HAProxy
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Если все настроено верно, то теперь можно пробовать подключить клиентов Telegram с использованием ссылок из лога\api telemt.
|
||||
278
docs/fronting-splitting/TLS-F-TCP-S.ru.md
Normal file
278
docs/fronting-splitting/TLS-F-TCP-S.ru.md
Normal file
@@ -0,0 +1,278 @@
|
||||
# TLS-F и TCP-S в Telemt
|
||||
|
||||
## Общая архитектура
|
||||
|
||||
**Telemt** - это прежде всего реализация **MTProxy**, через которую проходит payload Telegram
|
||||
|
||||
Подсистема **TLS-Fronting / TCP-Splitting** служит **маскировочным транспортным слоем**, задача которого - сделать MTProxy-соединение внешне похожим на обычное TLS-подключение к легитимному сайту
|
||||
|
||||
Таким образом:
|
||||
|
||||
- **MTProxy** - основной функциональный слой Telemt для обработки Telegram-трафика
|
||||
- **TLS-Fronting / TCP-Splitting** - подсистема маскировки транспорта
|
||||
|
||||
С точки зрения сети Telemt ведёт себя как **TLS-сервер**, но фактически:
|
||||
|
||||
- валидные MTProxy-клиенты остаются внутри контура Telemt
|
||||
- любые другие TLS-клиенты проксируются на обычный HTTPS-сервер-заглушку
|
||||
|
||||
# Базовый сценарий / Best-practice
|
||||
|
||||
Предположим, у вас есть домен:
|
||||
|
||||
```
|
||||
umweltschutz.de
|
||||
```
|
||||
|
||||
### 1 DNS
|
||||
|
||||
Вы создаёте A-запись:
|
||||
|
||||
```
|
||||
umweltschutz.de -> A-запись 198.18.88.88
|
||||
```
|
||||
|
||||
где `198.18.88.88` - IP вашего сервера с telemt
|
||||
|
||||
### 2 TLS-домен
|
||||
|
||||
В конфигурации Telemt:
|
||||
|
||||
```toml
|
||||
[censorship]
|
||||
tls_domain = "umweltschutz.de"
|
||||
```
|
||||
|
||||
Этот домен используется клиентом как SNI в ClientHello
|
||||
|
||||
### 3 Сервер-заглушка
|
||||
|
||||
Вы поднимаете обычный HTTPS-сервер, например **nginx**, с сертификатом для этого домена.
|
||||
|
||||
Он может работать:
|
||||
|
||||
- на том же сервере
|
||||
- на другом сервере
|
||||
- на другом порту
|
||||
|
||||
В конфигурации Telemt:
|
||||
|
||||
```toml
|
||||
[censorship]
|
||||
mask_host = "127.0.0.1"
|
||||
mask_port = 8443
|
||||
```
|
||||
|
||||
где `127.0.0.1` - IP сервера-заглушки, а 8443 - порт, который он слушает
|
||||
|
||||
Этот сервер нужен **для обработки любых non-MTProxy запросов**
|
||||
|
||||
### 4 Работа Telemt
|
||||
|
||||
После запуска Telemt действует следующим образом:
|
||||
|
||||
1) принимает входящее TCP-соединение
|
||||
2) анализирует TLS-ClientHello
|
||||
3) пытается определить, является ли соединение валидным **MTProxy FakeTLS**
|
||||
|
||||
Далее работают два варианта логики:
|
||||
|
||||
---
|
||||
|
||||
# Сценарий 1 - MTProxy клиент с валидным ключом
|
||||
|
||||
Если клиент предъявил **валидный MTProxy-ключ**:
|
||||
|
||||
- соединение **остаётся внутри Telemt**
|
||||
- TLS используется только как **транспортная маскировка**
|
||||
- далее запускается обычная логика **MTProxy**
|
||||
|
||||
Для внешнего наблюдателя это выглядит как:
|
||||
|
||||
```
|
||||
TLS connection -> umweltschutz.de
|
||||
```
|
||||
|
||||
Хотя внутри передаётся **MTProto-трафик Telegram**
|
||||
|
||||
# Сценарий 2 - обычный TLS-клиент - crawler / scanner / browser
|
||||
|
||||
Если Telemt не обнаруживает валидный MTProxy-ключ:
|
||||
|
||||
соединение **переключается в режим TCP-Splitting / TCP-Splicing**.
|
||||
|
||||
В этом режиме Telemt:
|
||||
|
||||
1. открывает новое TCP-соединение к
|
||||
|
||||
```
|
||||
mask_host:mask_port
|
||||
```
|
||||
|
||||
2. начинает **проксировать TCP-трафик**
|
||||
|
||||
Важно:
|
||||
|
||||
* клиентский TLS-запрос **НЕ модифицируется**
|
||||
* **ClientHello передаётся "как есть", без изменений**
|
||||
* **SNI остаётся неизменным**
|
||||
* Telemt **не завершает TLS-рукопожатие**, а только перенаправляет его на более низком уровне сетевого стека - L4
|
||||
|
||||
Таким образом upstream-сервер получает **оригинальное TLS-соединение клиента**:
|
||||
|
||||
- если это nginx-заглушка, он просто отдаёт обычный сайт
|
||||
- для внешнего наблюдателя это выглядит как обычный HTTPS-сервер
|
||||
|
||||
# TCP-S / TCP-Splitting / TCP-Splicing
|
||||
|
||||
Ключевые свойства механизма:
|
||||
|
||||
**Telemt работает как TCP-переключатель:**
|
||||
|
||||
1) принимает соединение
|
||||
2️) определяет тип клиента
|
||||
3) либо:
|
||||
|
||||
- обрабатывает MTProxy внутри
|
||||
- либо проксирует TCP-поток
|
||||
|
||||
При проксировании:
|
||||
|
||||
- Telemt **разрешает `mask_host` в IP**
|
||||
- устанавливает TCP-соединение
|
||||
- начинает **bidirectional TCP relay**
|
||||
|
||||
При этом:
|
||||
|
||||
- TLS-рукопожатие происходит **между клиентом и `mask_host`**
|
||||
- Telemt выступает только **на уровне L4 - как TCP-релей**, такой же как HAProxy в TCP-режиме
|
||||
|
||||
# Использование чужого домена
|
||||
|
||||
Можно использовать и внешний сайт.
|
||||
|
||||
Например:
|
||||
|
||||
```toml
|
||||
[censorship]
|
||||
tls_domain = "github.com"
|
||||
mask_host = "github.com"
|
||||
mask_port = 443
|
||||
```
|
||||
|
||||
или
|
||||
|
||||
```toml
|
||||
[censorship]
|
||||
mask_host = "140.82.121.4"
|
||||
```
|
||||
|
||||
В этом случае:
|
||||
|
||||
- цензор видит **TLS-подключение к github.com**
|
||||
- обычные клиенты/краулер действительно получают **настоящий GitHub**
|
||||
|
||||
Telemt просто **проксирует TCP-соединение на GitHub**
|
||||
|
||||
# Что видит анализатор трафика?
|
||||
|
||||
Для DPI это выглядит так:
|
||||
|
||||
```
|
||||
client -> TLS -> github.com
|
||||
```
|
||||
|
||||
или
|
||||
|
||||
```
|
||||
client -> TLS -> umweltschutz.de
|
||||
```
|
||||
|
||||
TLS-handshake выглядит валидным, SNI соответствует домену, сертификат корректный - от целевого `mask_host:mask_port`
|
||||
|
||||
# Что видит сканер / краулер?
|
||||
|
||||
Если сканер попытается подключиться:
|
||||
|
||||
```
|
||||
openssl s_client -connect 198.18.88.88:443 -servername umweltschutz.de
|
||||
```
|
||||
|
||||
он получит **обычный HTTPS-сайт-заглушку**
|
||||
|
||||
Потому что:
|
||||
|
||||
- он не предъявил MTProxy-ключ
|
||||
- Telemt отправил соединение на `mask_host:mask_port`, на котором находится nginx
|
||||
|
||||
# Какую проблему решает TLS-Fronting / TCP-Splitting?
|
||||
|
||||
Эта архитектура решает сразу несколько проблем обхода цензуры.
|
||||
|
||||
## 1 Закрытие плоскости MTProxy от активного сканирования
|
||||
|
||||
Многие цензоры:
|
||||
|
||||
- сканируют IP-адреса
|
||||
- проверяют известные сигнатуры прокси
|
||||
|
||||
Telemt отвечает на такие проверки **обычным HTTPS-сайтом**, поэтому прокси невозможно обнаружить простым сканированием
|
||||
|
||||
---
|
||||
|
||||
## 2 Маскировка трафика под легитимный TLS
|
||||
|
||||
Для DPI-систем соединение выглядит как:
|
||||
|
||||
```
|
||||
обычный TLS-трафик к популярному домену
|
||||
```
|
||||
|
||||
Это делает блокировку значительно сложнее и непредсказуемее
|
||||
|
||||
---
|
||||
|
||||
## 3 Устойчивость к протокольному анализу
|
||||
|
||||
MTProxy трафик проходит **внутри TLS-like-потока**, поэтому:
|
||||
|
||||
- не видны характерные сигнатуры MTProto
|
||||
- соединение выглядит как обычный HTTPS
|
||||
|
||||
---
|
||||
|
||||
## 4 Правдоподобное поведение сервера
|
||||
|
||||
Даже если краулер:
|
||||
|
||||
- подключится сам
|
||||
- выполнит TLS-handshake
|
||||
- попытается получить HTTP-ответ
|
||||
|
||||
он увидит **реальный сайт**, а не telemt
|
||||
|
||||
Это устраняет один из главных признаков для антифрод-краулеров мобильных операторов
|
||||
|
||||
# Схема
|
||||
|
||||
```text
|
||||
Client
|
||||
│
|
||||
│ TCP
|
||||
│
|
||||
V
|
||||
Telemt
|
||||
│
|
||||
├── valid MTProxy key
|
||||
│ │
|
||||
│ V
|
||||
│ MTProxy logic
|
||||
│
|
||||
└── обычный TLS клиент
|
||||
│
|
||||
V
|
||||
TCP-Splitting
|
||||
│
|
||||
V
|
||||
mask_host:mask_port
|
||||
```
|
||||
BIN
docs/model/FakeTLS.png
Normal file
BIN
docs/model/FakeTLS.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 650 KiB |
285
docs/model/MODEL.en.md
Normal file
285
docs/model/MODEL.en.md
Normal file
@@ -0,0 +1,285 @@
|
||||
# Telemt Runtime Model
|
||||
|
||||
## Scope
|
||||
This document defines runtime concepts used by the Middle-End (ME) transport pipeline and the orchestration logic around it.
|
||||
|
||||
It focuses on:
|
||||
- `ME Pool / Reader / Writer / Refill / Registry`
|
||||
- `Adaptive Floor`
|
||||
- `Trio-State`
|
||||
- `Generation Lifecycle`
|
||||
|
||||
## Core Entities
|
||||
|
||||
### ME Pool
|
||||
`ME Pool` is the runtime orchestrator for all Middle-End writers.
|
||||
|
||||
Responsibilities:
|
||||
- Holds writer inventory by DC/family/endpoint.
|
||||
- Maintains routing primitives and writer selection policy.
|
||||
- Tracks generation state (`active`, `warm`, `draining` context).
|
||||
- Applies runtime policies (floor mode, refill, reconnect, reinit, fallback behavior).
|
||||
- Exposes readiness gates used by admission logic (for conditional accept/cast behavior).
|
||||
|
||||
Non-goals:
|
||||
- It does not own client protocol decoding.
|
||||
- It does not own per-client business policy (quotas/limits).
|
||||
|
||||
### ME Writer
|
||||
`ME Writer` is a long-lived ME RPC tunnel bound to one concrete ME endpoint (`ip:port`), with:
|
||||
- Outbound command channel (send path).
|
||||
- Associated reader loop (inbound path).
|
||||
- Health/degraded flags.
|
||||
- Contour/state and generation metadata.
|
||||
|
||||
A writer is the actual data plane carrier for client sessions once bound.
|
||||
|
||||
### ME Reader
|
||||
`ME Reader` is the inbound parser/dispatcher for one writer:
|
||||
- Reads/decrypts ME RPC frames.
|
||||
- Validates sequence/checksum.
|
||||
- Routes payloads to client-connection channels via `Registry`.
|
||||
- Emits close/ack/data events and updates telemetry.
|
||||
|
||||
Design intent:
|
||||
- Reader must stay non-blocking as much as possible.
|
||||
- Backpressure on a single client route must not stall the whole writer stream.
|
||||
|
||||
### Refill
|
||||
`Refill` is the recovery mechanism that restores writer coverage when capacity drops:
|
||||
- Per-endpoint restore (same endpoint first).
|
||||
- Per-DC restore to satisfy required floor.
|
||||
- Optional outage-mode/shadow behavior for fragile single-endpoint DCs.
|
||||
|
||||
Refill works asynchronously and should not block hot routing paths.
|
||||
|
||||
### Registry
|
||||
`Registry` is the routing index between ME and client sessions:
|
||||
- `conn_id -> client response channel`
|
||||
- `conn_id <-> writer_id` binding map
|
||||
- writer activity snapshots and idle tracking
|
||||
|
||||
Main invariants:
|
||||
- A `conn_id` routes to at most one active response channel.
|
||||
- Writer loss triggers safe unbind/cleanup and close propagation.
|
||||
- Registry state is the source of truth for active ME-bound session mapping.
|
||||
|
||||
## Adaptive Floor
|
||||
|
||||
### What it is
|
||||
`Adaptive Floor` is a runtime policy that changes target writer count per DC based on observed activity, instead of always holding static peak floor.
|
||||
|
||||
### Why it exists
|
||||
Goals:
|
||||
- Reduce idle writer churn under low traffic.
|
||||
- Keep enough warm capacity to avoid client-visible stalls on burst recovery.
|
||||
- Limit needless reconnect storms on unstable endpoints.
|
||||
|
||||
### Behavioral model
|
||||
- Under activity: floor converges toward configured static requirement.
|
||||
- Under prolonged idle: floor can shrink to a safe minimum.
|
||||
- Recovery/grace windows prevent aggressive oscillation.
|
||||
|
||||
### Safety constraints
|
||||
- Never violate minimal survivability floor for a DC group.
|
||||
- Refill must still restore quickly on demand.
|
||||
- Floor adaptation must not force-drop already bound healthy sessions.
|
||||
|
||||
## Trio-State
|
||||
|
||||
`Trio-State` is writer contouring:
|
||||
- `Warm`
|
||||
- `Active`
|
||||
- `Draining`
|
||||
|
||||
### State semantics
|
||||
- `Warm`: connected and validated, not primary for new binds.
|
||||
- `Active`: preferred for new binds and normal traffic.
|
||||
- `Draining`: no new regular binds; existing sessions continue until graceful retirement rules apply.
|
||||
|
||||
### Transition intent
|
||||
- `Warm -> Active`: when coverage/readiness conditions are satisfied.
|
||||
- `Active -> Draining`: on generation swap, endpoint replacement, or controlled retirement.
|
||||
- `Draining -> removed`: after drain TTL/force-close policy (or when naturally empty).
|
||||
|
||||
This separation reduces SPOF and keeps cutovers predictable.
|
||||
|
||||
## Generation Lifecycle
|
||||
|
||||
Generation isolates pool epochs during reinit/reconfiguration.
|
||||
|
||||
### Lifecycle phases
|
||||
1. `Bootstrap`: initial writers are established.
|
||||
2. `Warmup`: next generation writers are created and validated.
|
||||
3. `Activation`: generation promoted to active when coverage gate passes.
|
||||
4. `Drain`: previous generation becomes draining, existing sessions are allowed to finish.
|
||||
5. `Retire`: old generation writers are removed after graceful rules.
|
||||
|
||||
### Operational guarantees
|
||||
- No partial generation activation without minimum coverage.
|
||||
- Existing healthy client sessions should not be dropped just because a new generation appears.
|
||||
- Draining generation exists to absorb in-flight traffic during swap.
|
||||
|
||||
### Readiness and admission
|
||||
Pool readiness is not equivalent to “all endpoints fully saturated”.
|
||||
Typical gating strategy:
|
||||
- Open admission when per-DC minimal alive coverage exists.
|
||||
- Continue background saturation for multi-endpoint DCs.
|
||||
|
||||
This keeps startup latency low while preserving eventual full capacity.
|
||||
|
||||
## Interactions Between Concepts
|
||||
|
||||
- `Generation` defines pool epochs.
|
||||
- `Trio-State` defines per-writer role inside/around those epochs.
|
||||
- `Adaptive Floor` defines how much capacity should be maintained right now.
|
||||
- `Refill` is the actuator that closes the gap between desired and current capacity.
|
||||
- `Registry` keeps per-session routing correctness while all of the above changes over time.
|
||||
|
||||
## Architectural Approach
|
||||
|
||||
### Layered Design
|
||||
The runtime is intentionally split into two planes:
|
||||
- `Control Plane`: decides desired topology and policy (`floor`, `generation swap`, `refill`, `fallback`).
|
||||
- `Data Plane`: executes packet/session transport (`reader`, `writer`, routing, acks, close propagation).
|
||||
|
||||
Architectural rule:
|
||||
- Control Plane may change writer inventory and policy.
|
||||
- Data Plane must remain stable and low-latency while those changes happen.
|
||||
|
||||
### Ownership Model
|
||||
Ownership is centered around explicit state domains:
|
||||
- `MePool` owns writer lifecycle and policy state.
|
||||
- `Registry` owns per-connection routing bindings.
|
||||
- `Writer task` owns outbound ME socket send progression.
|
||||
- `Reader task` owns inbound ME socket parsing and event dispatch.
|
||||
|
||||
This prevents accidental cross-layer mutation and keeps invariants local.
|
||||
|
||||
### Control Plane Responsibilities
|
||||
Control Plane is event-driven and policy-driven:
|
||||
- Startup initialization and readiness gates.
|
||||
- Runtime reinit (periodic or config-triggered).
|
||||
- Coverage checks per DC/family/endpoint group.
|
||||
- Floor enforcement (static/adaptive).
|
||||
- Refill scheduling and retry orchestration.
|
||||
- Generation transition (`warm -> active`, previous `active -> draining`).
|
||||
|
||||
Control Plane must prioritize determinism over short-term aggressiveness.
|
||||
|
||||
### Data Plane Responsibilities
|
||||
Data Plane is throughput-first and allocation-sensitive:
|
||||
- Session bind to writer.
|
||||
- Per-frame parsing/validation and dispatch.
|
||||
- Ack and close signal propagation.
|
||||
- Route drop behavior under missing connection or closed channel.
|
||||
- Minimal critical logging in hot path.
|
||||
|
||||
Data Plane should avoid waiting on operations that are not strictly required for frame correctness.
|
||||
|
||||
## Concurrency and Synchronization
|
||||
|
||||
### Concurrency Principles
|
||||
- Per-writer isolation: each writer has independent send/read task loops.
|
||||
- Per-connection isolation: client channel state is scoped by `conn_id`.
|
||||
- Asynchronous recovery: refill/reconnect runs outside the packet hot path.
|
||||
|
||||
### Synchronization Strategy
|
||||
- Shared maps use fine-grained, short-lived locking.
|
||||
- Read-mostly paths avoid broad write-lock windows.
|
||||
- Backpressure decisions are localized at route/channel boundary.
|
||||
|
||||
Design target:
|
||||
- A slow consumer should degrade only itself (or its route), not global writer progress.
|
||||
|
||||
### Cancellation and Shutdown
|
||||
Writer and reader loops are cancellation-aware:
|
||||
- explicit cancel token / close command support;
|
||||
- safe unbind and cleanup via registry;
|
||||
- deterministic order: stop admission -> drain/close -> release resources.
|
||||
|
||||
## Consistency Model
|
||||
|
||||
### Session Consistency
|
||||
For one `conn_id`:
|
||||
- exactly one active route target at a time;
|
||||
- close and unbind must be idempotent;
|
||||
- writer loss must not leave dangling bindings.
|
||||
|
||||
### Generation Consistency
|
||||
Generational consistency guarantees:
|
||||
- New generation is not promoted before minimum coverage gate.
|
||||
- Previous generation remains available in `draining` state during handover.
|
||||
- Forced retirement is policy-bound (`drain ttl`, optional force-close), not immediate.
|
||||
|
||||
### Policy Consistency
|
||||
Policy changes (`adaptive/static floor`, fallback mode, retries) should apply without violating established active-session routing invariants.
|
||||
|
||||
## Backpressure and Flow Control
|
||||
|
||||
### Route-Level Backpressure
|
||||
Route channels are bounded by design.
|
||||
When pressure increases:
|
||||
- short burst absorption is allowed;
|
||||
- prolonged congestion triggers controlled drop semantics;
|
||||
- drop accounting is explicit via metrics/counters.
|
||||
|
||||
### Reader Non-Blocking Priority
|
||||
Inbound ME reader path should never be serialized behind one congested client route.
|
||||
Practical implication:
|
||||
- prefer non-blocking route attempt in the parser loop;
|
||||
- move heavy recovery to async side paths.
|
||||
|
||||
## Failure Domain Strategy
|
||||
|
||||
### Endpoint-Level Failure
|
||||
Failure of one endpoint should trigger endpoint-scoped recovery first:
|
||||
- same endpoint reconnect;
|
||||
- endpoint replacement within same DC group if applicable.
|
||||
|
||||
### DC-Level Degradation
|
||||
If a DC group cannot satisfy floor:
|
||||
- keep service via remaining coverage if policy allows;
|
||||
- continue asynchronous refill saturation in background.
|
||||
|
||||
### Whole-Pool Readiness Loss
|
||||
If no sufficient ME coverage exists:
|
||||
- admission gate can hold new accepts (conditional policy);
|
||||
- existing sessions should continue when their path remains healthy.
|
||||
|
||||
## Performance Architecture Notes
|
||||
|
||||
### Hotpath Discipline
|
||||
Allowed in hotpath:
|
||||
- fixed-size parsing and cheap validation;
|
||||
- bounded channel operations;
|
||||
- precomputed or low-allocation access patterns.
|
||||
|
||||
Avoid in hotpath:
|
||||
- repeated expensive decoding;
|
||||
- broad locks with awaits inside critical sections;
|
||||
- verbose high-frequency logging.
|
||||
|
||||
### Throughput Stability Over Peak Spikes
|
||||
Architecture prefers stable throughput and predictable latency over short peak gains that increase churn or long-tail reconnect times.
|
||||
|
||||
## Evolution and Extension Rules
|
||||
|
||||
To evolve this model safely:
|
||||
- Add new policy knobs in Control Plane first.
|
||||
- Keep Data Plane contracts stable (`conn_id`, route semantics, close semantics).
|
||||
- Validate generation and registry invariants before enabling by default.
|
||||
- Introduce new retry/recovery strategies behind explicit config.
|
||||
|
||||
## Failure and Recovery Notes
|
||||
|
||||
- Single-endpoint DC failure is a normal degraded mode case; policy should prioritize fast reconnect and optional shadow/probing strategies.
|
||||
- Idle close by peer should be treated as expected when upstream enforces idle timeout.
|
||||
- Reconnect backoff must protect against synchronized churn while still allowing fast first retries.
|
||||
- Fallback (`ME -> direct DC`) is a policy switch, not a transport bug by itself.
|
||||
|
||||
## Terminology Summary
|
||||
- `Coverage`: enough live writers to satisfy per-DC acceptance policy.
|
||||
- `Floor`: target minimum writer count policy.
|
||||
- `Churn`: frequent writer reconnect/remove cycles.
|
||||
- `Hotpath`: per-packet/per-connection data path where extra waits/allocations are expensive.
|
||||
285
docs/model/MODEL.ru.md
Normal file
285
docs/model/MODEL.ru.md
Normal file
@@ -0,0 +1,285 @@
|
||||
# Runtime-модель Telemt
|
||||
|
||||
## Область описания
|
||||
Документ фиксирует ключевые runtime-понятия пайплайна Middle-End (ME) и оркестрации вокруг него.
|
||||
|
||||
Фокус:
|
||||
- `ME Pool / Reader / Writer / Refill / Registry`
|
||||
- `Adaptive Floor`
|
||||
- `Trio-State`
|
||||
- `Generation Lifecycle`
|
||||
|
||||
## Базовые сущности
|
||||
|
||||
### ME Pool
|
||||
`ME Pool` — центральный оркестратор всех Middle-End writer-ов.
|
||||
|
||||
Зона ответственности:
|
||||
- хранит инвентарь writer-ов по DC/family/endpoint;
|
||||
- управляет выбором writer-а и маршрутизацией;
|
||||
- ведёт состояние поколений (`active`, `warm`, `draining` контекст);
|
||||
- применяет runtime-политики (floor, refill, reconnect, reinit, fallback);
|
||||
- отдаёт сигналы готовности для admission-логики (conditional accept/cast).
|
||||
|
||||
Что не делает:
|
||||
- не декодирует клиентский протокол;
|
||||
- не реализует бизнес-политику пользователя (квоты/лимиты).
|
||||
|
||||
### ME Writer
|
||||
`ME Writer` — долгоживущий ME RPC-канал к конкретному endpoint (`ip:port`), у которого есть:
|
||||
- канал команд на отправку;
|
||||
- связанный reader loop для входящего потока;
|
||||
- флаги состояния/деградации;
|
||||
- метаданные contour/state и generation.
|
||||
|
||||
Writer — это фактический data-plane носитель клиентских сессий после бинда.
|
||||
|
||||
### ME Reader
|
||||
`ME Reader` — входной parser/dispatcher одного writer-а:
|
||||
- читает и расшифровывает ME RPC-фреймы;
|
||||
- проверяет sequence/checksum;
|
||||
- маршрутизирует payload в client-каналы через `Registry`;
|
||||
- обрабатывает close/ack/data и обновляет телеметрию.
|
||||
|
||||
Инженерный принцип:
|
||||
- Reader должен оставаться неблокирующим.
|
||||
- Backpressure одной клиентской сессии не должен останавливать весь поток writer-а.
|
||||
|
||||
### Refill
|
||||
`Refill` — механизм восстановления покрытия writer-ов при просадке:
|
||||
- восстановление на том же endpoint в первую очередь;
|
||||
- восстановление по DC до требуемого floor;
|
||||
- опциональные outage/shadow-режимы для хрупких single-endpoint DC.
|
||||
|
||||
Refill работает асинхронно и не должен блокировать hotpath.
|
||||
|
||||
### Registry
|
||||
`Registry` — маршрутизационный индекс между ME и клиентскими сессиями:
|
||||
- `conn_id -> канал ответа клиенту`;
|
||||
- map биндов `conn_id <-> writer_id`;
|
||||
- снимки активности writer-ов и idle-трекинг.
|
||||
|
||||
Ключевые инварианты:
|
||||
- один `conn_id` маршрутизируется максимум в один активный канал ответа;
|
||||
- потеря writer-а приводит к безопасному unbind/cleanup и отправке close;
|
||||
- именно `Registry` является источником истины по активным ME-биндам.
|
||||
|
||||
## Adaptive Floor
|
||||
|
||||
### Что это
|
||||
`Adaptive Floor` — runtime-политика, которая динамически меняет целевое число writer-ов на DC в зависимости от активности, а не держит всегда фиксированный статический floor.
|
||||
|
||||
### Зачем
|
||||
Цели:
|
||||
- уменьшить churn на idle-трафике;
|
||||
- сохранить достаточную прогретую ёмкость для быстрых всплесков;
|
||||
- снизить лишние reconnect-штормы на нестабильных endpoint.
|
||||
|
||||
### Модель поведения
|
||||
- при активности floor стремится к статическому требованию;
|
||||
- при длительном idle floor может снижаться до безопасного минимума;
|
||||
- grace/recovery окна не дают системе "флапать" слишком резко.
|
||||
|
||||
### Ограничения безопасности
|
||||
- нельзя нарушать минимальный floor выживаемости DC-группы;
|
||||
- refill обязан быстро нарастить покрытие по запросу;
|
||||
- адаптация не должна принудительно ронять уже привязанные healthy-сессии.
|
||||
|
||||
## Trio-State
|
||||
|
||||
`Trio-State` — контурная роль writer-а:
|
||||
- `Warm`
|
||||
- `Active`
|
||||
- `Draining`
|
||||
|
||||
### Семантика состояний
|
||||
- `Warm`: writer подключён и валиден, но не основной для новых биндов.
|
||||
- `Active`: приоритетный для новых биндов и обычного трафика.
|
||||
- `Draining`: новые обычные бинды не назначаются; текущие сессии живут до правил graceful-вывода.
|
||||
|
||||
### Логика переходов
|
||||
- `Warm -> Active`: когда достигнуты условия покрытия/готовности.
|
||||
- `Active -> Draining`: при swap поколения, замене endpoint или контролируемом выводе.
|
||||
- `Draining -> removed`: после drain TTL/force-close политики (или естественного опустошения).
|
||||
|
||||
Такое разделение снижает SPOF-риски и делает cutover предсказуемым.
|
||||
|
||||
## Generation Lifecycle
|
||||
|
||||
Generation изолирует эпохи пула при reinit/reconfiguration.
|
||||
|
||||
### Фазы жизненного цикла
|
||||
1. `Bootstrap`: поднимается начальный набор writer-ов.
|
||||
2. `Warmup`: создаётся и валидируется новое поколение.
|
||||
3. `Activation`: новое поколение становится active после прохождения coverage-gate.
|
||||
4. `Drain`: предыдущее поколение переводится в draining, текущим сессиям дают завершиться.
|
||||
5. `Retire`: старое поколение удаляется по graceful-правилам.
|
||||
|
||||
### Операционные гарантии
|
||||
- нельзя активировать поколение частично без минимального покрытия;
|
||||
- healthy-клиенты не должны теряться только из-за появления нового поколения;
|
||||
- draining-поколение служит буфером для in-flight трафика во время swap.
|
||||
|
||||
### Готовность и приём клиентов
|
||||
Готовность пула не равна "все endpoint полностью насыщены".
|
||||
Типичная стратегия:
|
||||
- открыть admission при минимально достаточном alive-покрытии по DC;
|
||||
- параллельно продолжать saturation для multi-endpoint DC.
|
||||
|
||||
Это уменьшает startup latency и сохраняет выход на полную ёмкость.
|
||||
|
||||
## Как понятия связаны между собой
|
||||
|
||||
- `Generation` задаёт эпохи пула.
|
||||
- `Trio-State` задаёт роль каждого writer-а внутри/между эпохами.
|
||||
- `Adaptive Floor` задаёт, сколько ёмкости нужно сейчас.
|
||||
- `Refill` — исполнитель, который закрывает разницу между desired и current capacity.
|
||||
- `Registry` гарантирует корректную маршрутизацию сессий, пока всё выше меняется.
|
||||
|
||||
## Архитектурный подход
|
||||
|
||||
### Слоистая модель
|
||||
Runtime специально разделён на две плоскости:
|
||||
- `Control Plane`: принимает решения о целевой топологии и политиках (`floor`, `generation swap`, `refill`, `fallback`).
|
||||
- `Data Plane`: исполняет транспорт сессий и пакетов (`reader`, `writer`, маршрутизация, ack, close).
|
||||
|
||||
Ключевое правило:
|
||||
- Control Plane может менять состав writer-ов и policy.
|
||||
- Data Plane должен оставаться стабильным и низколатентным в момент этих изменений.
|
||||
|
||||
### Модель владения состоянием
|
||||
Владение разделено по доменам:
|
||||
- `MePool` владеет жизненным циклом writer-ов и policy-state.
|
||||
- `Registry` владеет routing-биндами клиентских сессий.
|
||||
- `Writer task` владеет исходящей прогрессией ME-сокета.
|
||||
- `Reader task` владеет входящим парсингом и dispatch-событиями.
|
||||
|
||||
Это ограничивает побочные мутации и локализует инварианты.
|
||||
|
||||
### Обязанности Control Plane
|
||||
Control Plane работает событийно и policy-ориентированно:
|
||||
- стартовая инициализация и readiness-gate;
|
||||
- runtime reinit (периодический и/или по изменению конфигурации);
|
||||
- проверки покрытия по DC/family/endpoint group;
|
||||
- применение floor-политики (static/adaptive);
|
||||
- планирование refill и orchestration retry;
|
||||
- переходы поколений (`warm -> active`, прежний `active -> draining`).
|
||||
|
||||
Для него важнее детерминизм, чем агрессивная краткосрочная реакция.
|
||||
|
||||
### Обязанности Data Plane
|
||||
Data Plane ориентирован на пропускную способность и предсказуемую задержку:
|
||||
- bind клиентской сессии к writer-у;
|
||||
- per-frame parsing/validation/dispatch;
|
||||
- распространение ack/close;
|
||||
- корректная реакция на missing conn/closed channel;
|
||||
- минимальный лог-шум в hotpath.
|
||||
|
||||
Data Plane не должен ждать операций, не критичных для корректности текущего фрейма.
|
||||
|
||||
## Конкурентность и синхронизация
|
||||
|
||||
### Принципы конкурентности
|
||||
- Изоляция по writer-у: у каждого writer-а независимые send/read loop.
|
||||
- Изоляция по сессии: состояние канала локально для `conn_id`.
|
||||
- Асинхронное восстановление: refill/reconnect выполняются вне пакетного hotpath.
|
||||
|
||||
### Стратегия синхронизации
|
||||
- Для shared map используются короткие и узкие lock-секции.
|
||||
- Read-heavy пути избегают длительных write-lock окон.
|
||||
- Решения по backpressure локализованы на границе route/channel.
|
||||
|
||||
Цель:
|
||||
- медленный consumer должен деградировать локально, не останавливая глобальный прогресс writer-а.
|
||||
|
||||
### Cancellation и shutdown
|
||||
Reader/Writer loop должны быть cancellation-aware:
|
||||
- явные cancel token / close command;
|
||||
- безопасный unbind/cleanup через registry;
|
||||
- детерминированный порядок: stop admission -> drain/close -> release resources.
|
||||
|
||||
## Модель согласованности
|
||||
|
||||
### Согласованность сессии
|
||||
Для одного `conn_id`:
|
||||
- одновременно ровно один активный route-target;
|
||||
- close/unbind операции идемпотентны;
|
||||
- потеря writer-а не оставляет dangling-бинды.
|
||||
|
||||
### Согласованность поколения
|
||||
Гарантии generation:
|
||||
- новое поколение не активируется до прохождения минимального coverage-gate;
|
||||
- предыдущее поколение остаётся в `draining` на время handover;
|
||||
- принудительный вывод writer-ов ограничен policy (`drain ttl`, optional force-close), а не мгновенный.
|
||||
|
||||
### Согласованность политик
|
||||
Изменение policy (`adaptive/static floor`, fallback mode, retries) не должно ломать инварианты маршрутизации уже активных сессий.
|
||||
|
||||
## Backpressure и управление потоком
|
||||
|
||||
### Route-level backpressure
|
||||
Route-каналы намеренно bounded.
|
||||
При росте нагрузки:
|
||||
- кратковременный burst поглощается;
|
||||
- длительная перегрузка переходит в контролируемую drop-семантику;
|
||||
- все drop-сценарии должны быть прозрачно видны в метриках.
|
||||
|
||||
### Приоритет неблокирующего Reader
|
||||
Входящий ME-reader path не должен сериализоваться из-за одной перегруженной клиентской сессии.
|
||||
Практически это означает:
|
||||
- использовать неблокирующую попытку route в parser loop;
|
||||
- выносить тяжёлое восстановление в асинхронные side-path.
|
||||
|
||||
## Стратегия доменов отказа
|
||||
|
||||
### Отказ отдельного endpoint
|
||||
Сначала применяется endpoint-local recovery:
|
||||
- reconnect в тот же endpoint;
|
||||
- затем замена endpoint внутри той же DC-группы (если доступно).
|
||||
|
||||
### Деградация уровня DC
|
||||
Если DC-группа не набирает floor:
|
||||
- сервис сохраняется на остаточном покрытии (если policy разрешает);
|
||||
- saturation refill продолжается асинхронно в фоне.
|
||||
|
||||
### Потеря готовности всего пула
|
||||
Если достаточного ME-покрытия нет:
|
||||
- admission gate может временно закрыть приём новых подключений (conditional policy);
|
||||
- уже активные сессии продолжают работать, пока их маршрут остаётся healthy.
|
||||
|
||||
## Архитектурные заметки по производительности
|
||||
|
||||
### Дисциплина hotpath
|
||||
Допустимо в hotpath:
|
||||
- фиксированный и дешёвый parsing/validation;
|
||||
- bounded channel operations;
|
||||
- precomputed/low-allocation доступ к данным.
|
||||
|
||||
Нежелательно в hotpath:
|
||||
- повторные дорогие decode;
|
||||
- широкие lock-секции с `await` внутри;
|
||||
- высокочастотный подробный logging.
|
||||
|
||||
### Стабильность важнее пиков
|
||||
Архитектура приоритетно выбирает стабильную пропускную способность и предсказуемую latency, а не краткосрочные пики ценой churn и long-tail reconnect.
|
||||
|
||||
## Правила эволюции модели
|
||||
|
||||
Чтобы расширять модель безопасно:
|
||||
- новые policy knobs сначала внедрять в Control Plane;
|
||||
- контракты Data Plane (`conn_id`, route/close семантика) держать стабильными;
|
||||
- перед дефолтным включением проверять generation/registry инварианты;
|
||||
- новые recovery/retry стратегии вводить через явный config-флаг.
|
||||
|
||||
## Нюансы отказов и восстановления
|
||||
|
||||
- падение single-endpoint DC — штатный деградированный сценарий; приоритет: быстрый reconnect и, при необходимости, shadow/probing;
|
||||
- idle-close со стороны peer должен считаться нормальным событием при upstream idle-timeout;
|
||||
- backoff reconnect-логики должен ограничивать синхронный churn, но сохранять быстрые первые попытки;
|
||||
- fallback (`ME -> direct DC`) — это переключаемая policy-ветка, а не автоматический признак бага транспорта.
|
||||
|
||||
## Краткий словарь
|
||||
- `Coverage`: достаточное число живых writer-ов для политики приёма по DC.
|
||||
- `Floor`: целевая минимальная ёмкость writer-ов.
|
||||
- `Churn`: частые циклы reconnect/remove writer-ов.
|
||||
- `Hotpath`: пер-пакетный/пер-коннектный путь, где любые лишние ожидания и аллокации особенно дороги.
|
||||
BIN
docs/model/architecture.png
Normal file
BIN
docs/model/architecture.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 838 KiB |
601
install.sh
601
install.sh
@@ -1,73 +1,556 @@
|
||||
sudo bash -c '
|
||||
set -e
|
||||
#!/bin/sh
|
||||
set -eu
|
||||
|
||||
# --- Проверка на существующую установку ---
|
||||
if systemctl list-unit-files | grep -q telemt.service; then
|
||||
# --- РЕЖИМ ОБНОВЛЕНИЯ ---
|
||||
echo "--- Обнаружена существующая установка Telemt. Запускаю обновление... ---"
|
||||
REPO="${REPO:-telemt/telemt}"
|
||||
BIN_NAME="${BIN_NAME:-telemt}"
|
||||
INSTALL_DIR="${INSTALL_DIR:-/bin}"
|
||||
CONFIG_DIR="${CONFIG_DIR:-/etc/telemt}"
|
||||
CONFIG_FILE="${CONFIG_FILE:-${CONFIG_DIR}/telemt.toml}"
|
||||
WORK_DIR="${WORK_DIR:-/opt/telemt}"
|
||||
TLS_DOMAIN="${TLS_DOMAIN:-petrovich.ru}"
|
||||
SERVICE_NAME="telemt"
|
||||
TEMP_DIR=""
|
||||
SUDO=""
|
||||
CONFIG_PARENT_DIR=""
|
||||
SERVICE_START_FAILED=0
|
||||
|
||||
echo "[*] Остановка службы telemt..."
|
||||
systemctl stop telemt || true # Игнорируем ошибку, если служба уже остановлена
|
||||
ACTION="install"
|
||||
TARGET_VERSION="${VERSION:-latest}"
|
||||
|
||||
echo "[1/2] Скачивание последней версии Telemt..."
|
||||
wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
-h|--help) ACTION="help"; shift ;;
|
||||
uninstall|--uninstall)
|
||||
if [ "$ACTION" != "purge" ]; then ACTION="uninstall"; fi
|
||||
shift ;;
|
||||
purge|--purge) ACTION="purge"; shift ;;
|
||||
install|--install) ACTION="install"; shift ;;
|
||||
-*) printf '[ERROR] Unknown option: %s\n' "$1" >&2; exit 1 ;;
|
||||
*)
|
||||
if [ "$ACTION" = "install" ]; then TARGET_VERSION="$1"
|
||||
else printf '[WARNING] Ignoring extra argument: %s\n' "$1" >&2; fi
|
||||
shift ;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "[1/2] Замена исполняемого файла в /usr/local/bin..."
|
||||
mv telemt /usr/local/bin/telemt
|
||||
chmod +x /usr/local/bin/telemt
|
||||
say() {
|
||||
if [ "$#" -eq 0 ] || [ -z "${1:-}" ]; then
|
||||
printf '\n'
|
||||
else
|
||||
printf '[INFO] %s\n' "$*"
|
||||
fi
|
||||
}
|
||||
die() { printf '[ERROR] %s\n' "$*" >&2; exit 1; }
|
||||
|
||||
echo "[2/2] Запуск службы..."
|
||||
systemctl start telemt
|
||||
write_root() { $SUDO sh -c 'cat > "$1"' _ "$1"; }
|
||||
|
||||
echo "--- Обновление Telemt успешно завершено! ---"
|
||||
echo
|
||||
echo "Для проверки статуса службы выполните:"
|
||||
echo " systemctl status telemt"
|
||||
cleanup() {
|
||||
if [ -n "${TEMP_DIR:-}" ] && [ -d "$TEMP_DIR" ]; then
|
||||
rm -rf -- "$TEMP_DIR"
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT INT TERM
|
||||
|
||||
else
|
||||
# --- РЕЖИМ НОВОЙ УСТАНОВКИ ---
|
||||
echo "--- Начало автоматической установки Telemt ---"
|
||||
show_help() {
|
||||
say "Usage: $0 [ <version> | install | uninstall | purge | --help ]"
|
||||
say " <version> Install specific version (e.g. 3.3.15, default: latest)"
|
||||
say " install Install the latest version"
|
||||
say " uninstall Remove the binary and service (keeps config and user)"
|
||||
say " purge Remove everything including configuration, data, and user"
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Шаг 1: Скачивание и установка бинарного файла
|
||||
echo "[1/5] Скачивание последней версии Telemt..."
|
||||
wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz
|
||||
check_os_entity() {
|
||||
if command -v getent >/dev/null 2>&1; then getent "$1" "$2" >/dev/null 2>&1
|
||||
else grep -q "^${2}:" "/etc/$1" 2>/dev/null; fi
|
||||
}
|
||||
|
||||
echo "[1/5] Перемещение исполняемого файла в /usr/local/bin и установка прав..."
|
||||
mv telemt /usr/local/bin/telemt
|
||||
chmod +x /usr/local/bin/telemt
|
||||
normalize_path() {
|
||||
printf '%s\n' "$1" | tr -s '/' | sed 's|/$||; s|^$|/|'
|
||||
}
|
||||
|
||||
# Шаг 2: Генерация секрета
|
||||
echo "[2/5] Генерация секретного ключа..."
|
||||
SECRET=$(openssl rand -hex 16)
|
||||
get_realpath() {
|
||||
path_in="$1"
|
||||
case "$path_in" in /*) ;; *) path_in="$(pwd)/$path_in" ;; esac
|
||||
|
||||
# Шаг 3: Создание файла конфигурации
|
||||
echo "[3/5] Создание файла конфигурации /etc/telemt.toml..."
|
||||
printf "# === General Settings ===\n[general]\n[general.modes]\nclassic = false\nsecure = false\ntls = true\n\n# === Anti-Censorship & Masking ===\n[censorship]\n# !!! ВАЖНО: Замените на ваш домен или домен, который вы хотите использовать для маскировки !!!\ntls_domain = \"petrovich.ru\"\n\n[access.users]\nhello = \"%s\"\n" "$SECRET" > /etc/telemt.toml
|
||||
if command -v realpath >/dev/null 2>&1; then
|
||||
if realpath_out="$(realpath -m "$path_in" 2>/dev/null)"; then
|
||||
printf '%s\n' "$realpath_out"
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
if command -v readlink >/dev/null 2>&1; then
|
||||
resolved_path="$(readlink -f "$path_in" 2>/dev/null || true)"
|
||||
if [ -n "$resolved_path" ]; then
|
||||
printf '%s\n' "$resolved_path"
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
# Шаг 4: Создание службы Systemd
|
||||
echo "[4/5] Создание службы systemd..."
|
||||
printf "[Unit]\nDescription=Telemt Proxy\nAfter=network.target\n\n[Service]\nType=simple\nExecStart=/usr/local/bin/telemt /etc/telemt.toml\nRestart=on-failure\nRestartSec=5\nLimitNOFILE=65536\n\n[Install]\nWantedBy=multi-user.target\n" > /etc/systemd/system/telemt.service
|
||||
d="${path_in%/*}"; b="${path_in##*/}"
|
||||
if [ -z "$d" ]; then d="/"; fi
|
||||
if [ "$d" = "$path_in" ]; then d="/"; b="$path_in"; fi
|
||||
|
||||
# Шаг 5: Запуск службы
|
||||
echo "[5/5] Перезагрузка systemd, запуск и включение службы telemt..."
|
||||
systemctl daemon-reload
|
||||
systemctl start telemt
|
||||
systemctl enable telemt
|
||||
if [ -d "$d" ]; then
|
||||
abs_d="$(cd "$d" >/dev/null 2>&1 && pwd || true)"
|
||||
if [ -n "$abs_d" ]; then
|
||||
if [ "$b" = "." ] || [ -z "$b" ]; then printf '%s\n' "$abs_d"
|
||||
elif [ "$abs_d" = "/" ]; then printf '/%s\n' "$b"
|
||||
else printf '%s/%s\n' "$abs_d" "$b"; fi
|
||||
else
|
||||
normalize_path "$path_in"
|
||||
fi
|
||||
else
|
||||
normalize_path "$path_in"
|
||||
fi
|
||||
}
|
||||
|
||||
echo "--- Установка и запуск Telemt успешно завершены! ---"
|
||||
echo
|
||||
echo "ВАЖНАЯ ИНФОРМАЦИЯ:"
|
||||
echo "==================="
|
||||
echo "1. Вам НЕОБХОДИМО отредактировать файл /etc/telemt.toml и заменить '\''petrovich.ru'\'' на другой домен"
|
||||
echo " с помощью команды:"
|
||||
echo " nano /etc/telemt.toml"
|
||||
echo " После редактирования файла перезапустите службу командой:"
|
||||
echo " sudo systemctl restart telemt"
|
||||
echo
|
||||
echo "2. Для проверки статуса службы выполните команду:"
|
||||
echo " systemctl status telemt"
|
||||
echo
|
||||
echo "3. Для получения ссылок на подключение выполните команду:"
|
||||
echo " journalctl -u telemt -n -g '\''links'\'' --no-pager -o cat | tac"
|
||||
fi
|
||||
'
|
||||
get_svc_mgr() {
|
||||
if command -v systemctl >/dev/null 2>&1 && [ -d /run/systemd/system ]; then echo "systemd"
|
||||
elif command -v rc-service >/dev/null 2>&1; then echo "openrc"
|
||||
else echo "none"; fi
|
||||
}
|
||||
|
||||
verify_common() {
|
||||
[ -n "$BIN_NAME" ] || die "BIN_NAME cannot be empty."
|
||||
[ -n "$INSTALL_DIR" ] || die "INSTALL_DIR cannot be empty."
|
||||
[ -n "$CONFIG_DIR" ] || die "CONFIG_DIR cannot be empty."
|
||||
[ -n "$CONFIG_FILE" ] || die "CONFIG_FILE cannot be empty."
|
||||
|
||||
case "${INSTALL_DIR}${CONFIG_DIR}${WORK_DIR}${CONFIG_FILE}" in
|
||||
*[!a-zA-Z0-9_./-]*) die "Invalid characters in paths. Only alphanumeric, _, ., -, and / allowed." ;;
|
||||
esac
|
||||
|
||||
case "$TARGET_VERSION" in *[!a-zA-Z0-9_.-]*) die "Invalid characters in version." ;; esac
|
||||
case "$BIN_NAME" in *[!a-zA-Z0-9_-]*) die "Invalid characters in BIN_NAME." ;; esac
|
||||
|
||||
INSTALL_DIR="$(get_realpath "$INSTALL_DIR")"
|
||||
CONFIG_DIR="$(get_realpath "$CONFIG_DIR")"
|
||||
WORK_DIR="$(get_realpath "$WORK_DIR")"
|
||||
CONFIG_FILE="$(get_realpath "$CONFIG_FILE")"
|
||||
|
||||
CONFIG_PARENT_DIR="${CONFIG_FILE%/*}"
|
||||
if [ -z "$CONFIG_PARENT_DIR" ]; then CONFIG_PARENT_DIR="/"; fi
|
||||
if [ "$CONFIG_PARENT_DIR" = "$CONFIG_FILE" ]; then CONFIG_PARENT_DIR="."; fi
|
||||
|
||||
if [ "$(id -u)" -eq 0 ]; then
|
||||
SUDO=""
|
||||
else
|
||||
command -v sudo >/dev/null 2>&1 || die "This script requires root or sudo. Neither found."
|
||||
SUDO="sudo"
|
||||
if ! sudo -n true 2>/dev/null; then
|
||||
if ! [ -t 0 ]; then
|
||||
die "sudo requires a password, but no TTY detected. Aborting to prevent hang."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$SUDO" ]; then
|
||||
if $SUDO sh -c '[ -d "$1" ]' _ "$CONFIG_FILE"; then
|
||||
die "Safety check failed: CONFIG_FILE '$CONFIG_FILE' is a directory."
|
||||
fi
|
||||
elif [ -d "$CONFIG_FILE" ]; then
|
||||
die "Safety check failed: CONFIG_FILE '$CONFIG_FILE' is a directory."
|
||||
fi
|
||||
|
||||
for path in "$CONFIG_DIR" "$CONFIG_PARENT_DIR" "$WORK_DIR"; do
|
||||
check_path="$(get_realpath "$path")"
|
||||
case "$check_path" in
|
||||
/|/bin|/sbin|/usr|/usr/bin|/usr/sbin|/usr/local|/usr/local/bin|/usr/local/sbin|/usr/local/etc|/usr/local/share|/etc|/var|/var/lib|/var/log|/var/run|/home|/root|/tmp|/lib|/lib64|/opt|/run|/boot|/dev|/sys|/proc)
|
||||
die "Safety check failed: '$path' (resolved to '$check_path') is a critical system directory." ;;
|
||||
esac
|
||||
done
|
||||
|
||||
check_install_dir="$(get_realpath "$INSTALL_DIR")"
|
||||
case "$check_install_dir" in
|
||||
/|/etc|/var|/home|/root|/tmp|/usr|/usr/local|/opt|/boot|/dev|/sys|/proc|/run)
|
||||
die "Safety check failed: INSTALL_DIR '$INSTALL_DIR' is a critical system directory." ;;
|
||||
esac
|
||||
|
||||
for cmd in id uname grep find rm chown chmod mv mktemp mkdir tr dd sed ps head sleep cat tar gzip rmdir; do
|
||||
command -v "$cmd" >/dev/null 2>&1 || die "Required command not found: $cmd"
|
||||
done
|
||||
}
|
||||
|
||||
verify_install_deps() {
|
||||
command -v curl >/dev/null 2>&1 || command -v wget >/dev/null 2>&1 || die "Neither curl nor wget is installed."
|
||||
command -v cp >/dev/null 2>&1 || command -v install >/dev/null 2>&1 || die "Need cp or install"
|
||||
|
||||
if ! command -v setcap >/dev/null 2>&1; then
|
||||
if command -v apk >/dev/null 2>&1; then
|
||||
$SUDO apk add --no-cache libcap-utils >/dev/null 2>&1 || $SUDO apk add --no-cache libcap >/dev/null 2>&1 || true
|
||||
elif command -v apt-get >/dev/null 2>&1; then
|
||||
$SUDO apt-get update -q >/dev/null 2>&1 || true
|
||||
$SUDO apt-get install -y -q libcap2-bin >/dev/null 2>&1 || true
|
||||
elif command -v dnf >/dev/null 2>&1; then $SUDO dnf install -y -q libcap >/dev/null 2>&1 || true
|
||||
elif command -v yum >/dev/null 2>&1; then $SUDO yum install -y -q libcap >/dev/null 2>&1 || true
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
detect_arch() {
|
||||
sys_arch="$(uname -m)"
|
||||
case "$sys_arch" in
|
||||
x86_64|amd64) echo "x86_64" ;;
|
||||
aarch64|arm64) echo "aarch64" ;;
|
||||
*) die "Unsupported architecture: $sys_arch" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
detect_libc() {
|
||||
for f in /lib/ld-musl-*.so.* /lib64/ld-musl-*.so.*; do
|
||||
if [ -e "$f" ]; then echo "musl"; return 0; fi
|
||||
done
|
||||
if grep -qE '^ID="?alpine"?' /etc/os-release 2>/dev/null; then echo "musl"; return 0; fi
|
||||
if command -v ldd >/dev/null 2>&1 && (ldd --version 2>&1 || true) | grep -qi musl; then echo "musl"; return 0; fi
|
||||
echo "gnu"
|
||||
}
|
||||
|
||||
fetch_file() {
|
||||
if command -v curl >/dev/null 2>&1; then curl -fsSL "$1" -o "$2"
|
||||
else wget -q -O "$2" "$1"; fi
|
||||
}
|
||||
|
||||
ensure_user_group() {
|
||||
nologin_bin="$(command -v nologin 2>/dev/null || command -v false 2>/dev/null || echo /bin/false)"
|
||||
|
||||
if ! check_os_entity group telemt; then
|
||||
if command -v groupadd >/dev/null 2>&1; then $SUDO groupadd -r telemt
|
||||
elif command -v addgroup >/dev/null 2>&1; then $SUDO addgroup -S telemt
|
||||
else die "Cannot create group"; fi
|
||||
fi
|
||||
|
||||
if ! check_os_entity passwd telemt; then
|
||||
if command -v useradd >/dev/null 2>&1; then
|
||||
$SUDO useradd -r -g telemt -d "$WORK_DIR" -s "$nologin_bin" -c "Telemt Proxy" telemt
|
||||
elif command -v adduser >/dev/null 2>&1; then
|
||||
if adduser --help 2>&1 | grep -q -- '-S'; then
|
||||
$SUDO adduser -S -D -H -h "$WORK_DIR" -s "$nologin_bin" -G telemt telemt
|
||||
else
|
||||
$SUDO adduser --system --home "$WORK_DIR" --shell "$nologin_bin" --no-create-home --ingroup telemt --disabled-password telemt
|
||||
fi
|
||||
else die "Cannot create user"; fi
|
||||
fi
|
||||
}
|
||||
|
||||
setup_dirs() {
|
||||
$SUDO mkdir -p "$WORK_DIR" "$CONFIG_DIR" "$CONFIG_PARENT_DIR" || die "Failed to create directories"
|
||||
|
||||
$SUDO chown telemt:telemt "$WORK_DIR" && $SUDO chmod 750 "$WORK_DIR"
|
||||
$SUDO chown root:telemt "$CONFIG_DIR" && $SUDO chmod 750 "$CONFIG_DIR"
|
||||
|
||||
if [ "$CONFIG_PARENT_DIR" != "$CONFIG_DIR" ] && [ "$CONFIG_PARENT_DIR" != "." ] && [ "$CONFIG_PARENT_DIR" != "/" ]; then
|
||||
$SUDO chown root:telemt "$CONFIG_PARENT_DIR" && $SUDO chmod 750 "$CONFIG_PARENT_DIR"
|
||||
fi
|
||||
}
|
||||
|
||||
stop_service() {
|
||||
svc="$(get_svc_mgr)"
|
||||
if [ "$svc" = "systemd" ] && systemctl is-active --quiet "$SERVICE_NAME" 2>/dev/null; then
|
||||
$SUDO systemctl stop "$SERVICE_NAME" 2>/dev/null || true
|
||||
elif [ "$svc" = "openrc" ] && rc-service "$SERVICE_NAME" status >/dev/null 2>&1; then
|
||||
$SUDO rc-service "$SERVICE_NAME" stop 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
install_binary() {
|
||||
bin_src="$1"; bin_dst="$2"
|
||||
if [ -e "$INSTALL_DIR" ] && [ ! -d "$INSTALL_DIR" ]; then
|
||||
die "'$INSTALL_DIR' is not a directory."
|
||||
fi
|
||||
|
||||
$SUDO mkdir -p "$INSTALL_DIR" || die "Failed to create install directory"
|
||||
if command -v install >/dev/null 2>&1; then
|
||||
$SUDO install -m 0755 "$bin_src" "$bin_dst" || die "Failed to install binary"
|
||||
else
|
||||
$SUDO rm -f "$bin_dst" 2>/dev/null || true
|
||||
$SUDO cp "$bin_src" "$bin_dst" && $SUDO chmod 0755 "$bin_dst" || die "Failed to copy binary"
|
||||
fi
|
||||
|
||||
$SUDO sh -c '[ -x "$1" ]' _ "$bin_dst" || die "Binary not executable: $bin_dst"
|
||||
|
||||
if command -v setcap >/dev/null 2>&1; then
|
||||
$SUDO setcap cap_net_bind_service=+ep "$bin_dst" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
generate_secret() {
|
||||
secret="$(command -v openssl >/dev/null 2>&1 && openssl rand -hex 16 2>/dev/null || true)"
|
||||
if [ -z "$secret" ] || [ "${#secret}" -ne 32 ]; then
|
||||
if command -v od >/dev/null 2>&1; then secret="$(dd if=/dev/urandom bs=16 count=1 2>/dev/null | od -An -tx1 | tr -d ' \n')"
|
||||
elif command -v hexdump >/dev/null 2>&1; then secret="$(dd if=/dev/urandom bs=16 count=1 2>/dev/null | hexdump -e '1/1 "%02x"')"
|
||||
elif command -v xxd >/dev/null 2>&1; then secret="$(dd if=/dev/urandom bs=16 count=1 2>/dev/null | xxd -p | tr -d '\n')"
|
||||
fi
|
||||
fi
|
||||
if [ "${#secret}" -eq 32 ]; then echo "$secret"; else return 1; fi
|
||||
}
|
||||
|
||||
generate_config_content() {
|
||||
escaped_tls_domain="$(printf '%s\n' "$TLS_DOMAIN" | tr -d '[:cntrl:]' | sed 's/\\/\\\\/g; s/"/\\"/g')"
|
||||
|
||||
cat <<EOF
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
[general.modes]
|
||||
classic = false
|
||||
secure = false
|
||||
tls = true
|
||||
|
||||
[server]
|
||||
port = 443
|
||||
|
||||
[server.api]
|
||||
enabled = true
|
||||
listen = "127.0.0.1:9091"
|
||||
whitelist = ["127.0.0.1/32"]
|
||||
|
||||
[censorship]
|
||||
tls_domain = "${escaped_tls_domain}"
|
||||
|
||||
[access.users]
|
||||
hello = "$1"
|
||||
EOF
|
||||
}
|
||||
|
||||
install_config() {
|
||||
if [ -n "$SUDO" ]; then
|
||||
if $SUDO sh -c '[ -f "$1" ]' _ "$CONFIG_FILE"; then
|
||||
say " -> Config already exists at $CONFIG_FILE. Skipping creation."
|
||||
return 0
|
||||
fi
|
||||
elif [ -f "$CONFIG_FILE" ]; then
|
||||
say " -> Config already exists at $CONFIG_FILE. Skipping creation."
|
||||
return 0
|
||||
fi
|
||||
|
||||
toml_secret="$(generate_secret)" || die "Failed to generate secret."
|
||||
|
||||
generate_config_content "$toml_secret" | write_root "$CONFIG_FILE" || die "Failed to install config"
|
||||
$SUDO chown root:telemt "$CONFIG_FILE" && $SUDO chmod 640 "$CONFIG_FILE"
|
||||
|
||||
say " -> Config created successfully."
|
||||
say " -> Generated secret for default user 'hello': $toml_secret"
|
||||
}
|
||||
|
||||
generate_systemd_content() {
|
||||
cat <<EOF
|
||||
[Unit]
|
||||
Description=Telemt
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=telemt
|
||||
Group=telemt
|
||||
WorkingDirectory=$WORK_DIR
|
||||
ExecStart="${INSTALL_DIR}/${BIN_NAME}" "${CONFIG_FILE}"
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
}
|
||||
|
||||
generate_openrc_content() {
|
||||
cat <<EOF
|
||||
#!/sbin/openrc-run
|
||||
name="$SERVICE_NAME"
|
||||
description="Telemt Proxy Service"
|
||||
command="${INSTALL_DIR}/${BIN_NAME}"
|
||||
command_args="${CONFIG_FILE}"
|
||||
command_background=true
|
||||
command_user="telemt:telemt"
|
||||
pidfile="/run/\${RC_SVCNAME}.pid"
|
||||
directory="${WORK_DIR}"
|
||||
rc_ulimit="-n 65536"
|
||||
depend() { need net; use logger; }
|
||||
EOF
|
||||
}
|
||||
|
||||
install_service() {
|
||||
svc="$(get_svc_mgr)"
|
||||
if [ "$svc" = "systemd" ]; then
|
||||
generate_systemd_content | write_root "/etc/systemd/system/${SERVICE_NAME}.service"
|
||||
$SUDO chown root:root "/etc/systemd/system/${SERVICE_NAME}.service" && $SUDO chmod 644 "/etc/systemd/system/${SERVICE_NAME}.service"
|
||||
|
||||
$SUDO systemctl daemon-reload || true
|
||||
$SUDO systemctl enable "$SERVICE_NAME" || true
|
||||
|
||||
if ! $SUDO systemctl start "$SERVICE_NAME"; then
|
||||
say "[WARNING] Failed to start service"
|
||||
SERVICE_START_FAILED=1
|
||||
fi
|
||||
elif [ "$svc" = "openrc" ]; then
|
||||
generate_openrc_content | write_root "/etc/init.d/${SERVICE_NAME}"
|
||||
$SUDO chown root:root "/etc/init.d/${SERVICE_NAME}" && $SUDO chmod 0755 "/etc/init.d/${SERVICE_NAME}"
|
||||
|
||||
$SUDO rc-update add "$SERVICE_NAME" default 2>/dev/null || true
|
||||
|
||||
if ! $SUDO rc-service "$SERVICE_NAME" start 2>/dev/null; then
|
||||
say "[WARNING] Failed to start service"
|
||||
SERVICE_START_FAILED=1
|
||||
fi
|
||||
else
|
||||
cmd="\"${INSTALL_DIR}/${BIN_NAME}\" \"${CONFIG_FILE}\""
|
||||
if [ -n "$SUDO" ]; then
|
||||
say " -> Service manager not found. Start manually: sudo -u telemt $cmd"
|
||||
else
|
||||
say " -> Service manager not found. Start manually: su -s /bin/sh telemt -c '$cmd'"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
kill_user_procs() {
|
||||
if command -v pkill >/dev/null 2>&1; then
|
||||
$SUDO pkill -u telemt "$BIN_NAME" 2>/dev/null || true
|
||||
sleep 1
|
||||
$SUDO pkill -9 -u telemt "$BIN_NAME" 2>/dev/null || true
|
||||
else
|
||||
if command -v pgrep >/dev/null 2>&1; then
|
||||
pids="$(pgrep -u telemt 2>/dev/null || true)"
|
||||
else
|
||||
pids="$(ps -u telemt -o pid= 2>/dev/null || true)"
|
||||
fi
|
||||
|
||||
if [ -n "$pids" ]; then
|
||||
for pid in $pids; do
|
||||
case "$pid" in ''|*[!0-9]*) continue ;; *) $SUDO kill "$pid" 2>/dev/null || true ;; esac
|
||||
done
|
||||
sleep 1
|
||||
for pid in $pids; do
|
||||
case "$pid" in ''|*[!0-9]*) continue ;; *) $SUDO kill -9 "$pid" 2>/dev/null || true ;; esac
|
||||
done
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
uninstall() {
|
||||
say "Starting uninstallation of $BIN_NAME..."
|
||||
|
||||
say ">>> Stage 1: Stopping services"
|
||||
stop_service
|
||||
|
||||
say ">>> Stage 2: Removing service configuration"
|
||||
svc="$(get_svc_mgr)"
|
||||
if [ "$svc" = "systemd" ]; then
|
||||
$SUDO systemctl disable "$SERVICE_NAME" 2>/dev/null || true
|
||||
$SUDO rm -f "/etc/systemd/system/${SERVICE_NAME}.service"
|
||||
$SUDO systemctl daemon-reload 2>/dev/null || true
|
||||
elif [ "$svc" = "openrc" ]; then
|
||||
$SUDO rc-update del "$SERVICE_NAME" 2>/dev/null || true
|
||||
$SUDO rm -f "/etc/init.d/${SERVICE_NAME}"
|
||||
fi
|
||||
|
||||
say ">>> Stage 3: Terminating user processes"
|
||||
kill_user_procs
|
||||
|
||||
say ">>> Stage 4: Removing binary"
|
||||
$SUDO rm -f "${INSTALL_DIR}/${BIN_NAME}"
|
||||
|
||||
if [ "$ACTION" = "purge" ]; then
|
||||
say ">>> Stage 5: Purging configuration, data, and user"
|
||||
$SUDO rm -rf "$CONFIG_DIR" "$WORK_DIR"
|
||||
$SUDO rm -f "$CONFIG_FILE"
|
||||
if [ "$CONFIG_PARENT_DIR" != "$CONFIG_DIR" ] && [ "$CONFIG_PARENT_DIR" != "." ] && [ "$CONFIG_PARENT_DIR" != "/" ]; then
|
||||
$SUDO rmdir "$CONFIG_PARENT_DIR" 2>/dev/null || true
|
||||
fi
|
||||
$SUDO userdel telemt 2>/dev/null || $SUDO deluser telemt 2>/dev/null || true
|
||||
$SUDO groupdel telemt 2>/dev/null || $SUDO delgroup telemt 2>/dev/null || true
|
||||
else
|
||||
say "Note: Configuration and user kept. Run with 'purge' to remove completely."
|
||||
fi
|
||||
|
||||
printf '\n====================================================================\n'
|
||||
printf ' UNINSTALLATION COMPLETE\n'
|
||||
printf '====================================================================\n\n'
|
||||
exit 0
|
||||
}
|
||||
|
||||
case "$ACTION" in
|
||||
help) show_help ;;
|
||||
uninstall|purge) verify_common; uninstall ;;
|
||||
install)
|
||||
say "Starting installation of $BIN_NAME (Version: $TARGET_VERSION)"
|
||||
|
||||
say ">>> Stage 1: Verifying environment and dependencies"
|
||||
verify_common; verify_install_deps
|
||||
|
||||
if [ "$TARGET_VERSION" != "latest" ]; then
|
||||
TARGET_VERSION="${TARGET_VERSION#v}"
|
||||
fi
|
||||
|
||||
ARCH="$(detect_arch)"; LIBC="$(detect_libc)"
|
||||
FILE_NAME="${BIN_NAME}-${ARCH}-linux-${LIBC}.tar.gz"
|
||||
|
||||
if [ "$TARGET_VERSION" = "latest" ]; then
|
||||
DL_URL="https://github.com/${REPO}/releases/latest/download/${FILE_NAME}"
|
||||
else
|
||||
DL_URL="https://github.com/${REPO}/releases/download/${TARGET_VERSION}/${FILE_NAME}"
|
||||
fi
|
||||
|
||||
say ">>> Stage 2: Downloading archive"
|
||||
TEMP_DIR="$(mktemp -d)" || die "Temp directory creation failed"
|
||||
if [ -z "$TEMP_DIR" ] || [ ! -d "$TEMP_DIR" ]; then
|
||||
die "Temp directory is invalid or was not created"
|
||||
fi
|
||||
|
||||
fetch_file "$DL_URL" "${TEMP_DIR}/${FILE_NAME}" || die "Download failed"
|
||||
|
||||
say ">>> Stage 3: Extracting archive"
|
||||
if ! gzip -dc "${TEMP_DIR}/${FILE_NAME}" | tar -xf - -C "$TEMP_DIR" 2>/dev/null; then
|
||||
die "Extraction failed (downloaded archive might be invalid or 404)."
|
||||
fi
|
||||
|
||||
EXTRACTED_BIN="$(find "$TEMP_DIR" -type f -name "$BIN_NAME" -print 2>/dev/null | head -n 1 || true)"
|
||||
[ -n "$EXTRACTED_BIN" ] || die "Binary '$BIN_NAME' not found in archive"
|
||||
|
||||
say ">>> Stage 4: Setting up environment (User, Group, Directories)"
|
||||
ensure_user_group; setup_dirs; stop_service
|
||||
|
||||
say ">>> Stage 5: Installing binary"
|
||||
install_binary "$EXTRACTED_BIN" "${INSTALL_DIR}/${BIN_NAME}"
|
||||
|
||||
say ">>> Stage 6: Generating configuration"
|
||||
install_config
|
||||
|
||||
say ">>> Stage 7: Installing and starting service"
|
||||
install_service
|
||||
|
||||
if [ "${SERVICE_START_FAILED:-0}" -eq 1 ]; then
|
||||
printf '\n====================================================================\n'
|
||||
printf ' INSTALLATION COMPLETED WITH WARNINGS\n'
|
||||
printf '====================================================================\n\n'
|
||||
printf 'The service was installed but failed to start automatically.\n'
|
||||
printf 'Please check the logs to determine the issue.\n\n'
|
||||
else
|
||||
printf '\n====================================================================\n'
|
||||
printf ' INSTALLATION SUCCESS\n'
|
||||
printf '====================================================================\n\n'
|
||||
fi
|
||||
|
||||
svc="$(get_svc_mgr)"
|
||||
if [ "$svc" = "systemd" ]; then
|
||||
printf 'To check the status of your proxy service, run:\n'
|
||||
printf ' systemctl status %s\n\n' "$SERVICE_NAME"
|
||||
elif [ "$svc" = "openrc" ]; then
|
||||
printf 'To check the status of your proxy service, run:\n'
|
||||
printf ' rc-service %s status\n\n' "$SERVICE_NAME"
|
||||
fi
|
||||
|
||||
printf 'To get your user connection links (for Telegram), run:\n'
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
printf ' curl -s http://127.0.0.1:9091/v1/users | jq -r '\''.data[] | "User: \\(.username)\\n\\(.links.tls[0] // empty)\\n"'\''\n'
|
||||
else
|
||||
printf ' curl -s http://127.0.0.1:9091/v1/users\n'
|
||||
printf ' (Tip: Install '\''jq'\'' for a much cleaner output)\n'
|
||||
fi
|
||||
|
||||
printf '\n====================================================================\n'
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -1,13 +1,39 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::io::Write;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use hyper::header::IF_MATCH;
|
||||
use serde::Serialize;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
|
||||
use super::model::ApiFailure;
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub(super) enum AccessSection {
|
||||
Users,
|
||||
UserAdTags,
|
||||
UserMaxTcpConns,
|
||||
UserExpirations,
|
||||
UserDataQuota,
|
||||
UserMaxUniqueIps,
|
||||
}
|
||||
|
||||
impl AccessSection {
|
||||
fn table_name(self) -> &'static str {
|
||||
match self {
|
||||
Self::Users => "access.users",
|
||||
Self::UserAdTags => "access.user_ad_tags",
|
||||
Self::UserMaxTcpConns => "access.user_max_tcp_conns",
|
||||
Self::UserExpirations => "access.user_expirations",
|
||||
Self::UserDataQuota => "access.user_data_quota",
|
||||
Self::UserMaxUniqueIps => "access.user_max_unique_ips",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn parse_if_match(headers: &hyper::HeaderMap) -> Option<String> {
|
||||
headers
|
||||
.get(IF_MATCH)
|
||||
@@ -66,6 +92,142 @@ pub(super) async fn save_config_to_disk(
|
||||
Ok(compute_revision(&serialized))
|
||||
}
|
||||
|
||||
pub(super) async fn save_access_sections_to_disk(
|
||||
config_path: &Path,
|
||||
cfg: &ProxyConfig,
|
||||
sections: &[AccessSection],
|
||||
) -> Result<String, ApiFailure> {
|
||||
let mut content = tokio::fs::read_to_string(config_path)
|
||||
.await
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to read config: {}", e)))?;
|
||||
|
||||
let mut applied = Vec::new();
|
||||
for section in sections {
|
||||
if applied.contains(section) {
|
||||
continue;
|
||||
}
|
||||
let rendered = render_access_section(cfg, *section)?;
|
||||
content = upsert_toml_table(&content, section.table_name(), &rendered);
|
||||
applied.push(*section);
|
||||
}
|
||||
|
||||
write_atomic(config_path.to_path_buf(), content.clone()).await?;
|
||||
Ok(compute_revision(&content))
|
||||
}
|
||||
|
||||
fn render_access_section(cfg: &ProxyConfig, section: AccessSection) -> Result<String, ApiFailure> {
|
||||
let body = match section {
|
||||
AccessSection::Users => {
|
||||
let rows: BTreeMap<String, String> = cfg
|
||||
.access
|
||||
.users
|
||||
.iter()
|
||||
.map(|(key, value)| (key.clone(), value.clone()))
|
||||
.collect();
|
||||
serialize_table_body(&rows)?
|
||||
}
|
||||
AccessSection::UserAdTags => {
|
||||
let rows: BTreeMap<String, String> = cfg
|
||||
.access
|
||||
.user_ad_tags
|
||||
.iter()
|
||||
.map(|(key, value)| (key.clone(), value.clone()))
|
||||
.collect();
|
||||
serialize_table_body(&rows)?
|
||||
}
|
||||
AccessSection::UserMaxTcpConns => {
|
||||
let rows: BTreeMap<String, usize> = cfg
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.iter()
|
||||
.map(|(key, value)| (key.clone(), *value))
|
||||
.collect();
|
||||
serialize_table_body(&rows)?
|
||||
}
|
||||
AccessSection::UserExpirations => {
|
||||
let rows: BTreeMap<String, DateTime<Utc>> = cfg
|
||||
.access
|
||||
.user_expirations
|
||||
.iter()
|
||||
.map(|(key, value)| (key.clone(), *value))
|
||||
.collect();
|
||||
serialize_table_body(&rows)?
|
||||
}
|
||||
AccessSection::UserDataQuota => {
|
||||
let rows: BTreeMap<String, u64> = cfg
|
||||
.access
|
||||
.user_data_quota
|
||||
.iter()
|
||||
.map(|(key, value)| (key.clone(), *value))
|
||||
.collect();
|
||||
serialize_table_body(&rows)?
|
||||
}
|
||||
AccessSection::UserMaxUniqueIps => {
|
||||
let rows: BTreeMap<String, usize> = cfg
|
||||
.access
|
||||
.user_max_unique_ips
|
||||
.iter()
|
||||
.map(|(key, value)| (key.clone(), *value))
|
||||
.collect();
|
||||
serialize_table_body(&rows)?
|
||||
}
|
||||
};
|
||||
|
||||
let mut out = format!("[{}]\n", section.table_name());
|
||||
if !body.is_empty() {
|
||||
out.push_str(&body);
|
||||
}
|
||||
if !out.ends_with('\n') {
|
||||
out.push('\n');
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
fn serialize_table_body<T: Serialize>(value: &T) -> Result<String, ApiFailure> {
|
||||
toml::to_string(value)
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to serialize access section: {}", e)))
|
||||
}
|
||||
|
||||
fn upsert_toml_table(source: &str, table_name: &str, replacement: &str) -> String {
|
||||
if let Some((start, end)) = find_toml_table_bounds(source, table_name) {
|
||||
let mut out = String::with_capacity(source.len() + replacement.len());
|
||||
out.push_str(&source[..start]);
|
||||
out.push_str(replacement);
|
||||
out.push_str(&source[end..]);
|
||||
return out;
|
||||
}
|
||||
|
||||
let mut out = source.to_string();
|
||||
if !out.is_empty() && !out.ends_with('\n') {
|
||||
out.push('\n');
|
||||
}
|
||||
if !out.is_empty() {
|
||||
out.push('\n');
|
||||
}
|
||||
out.push_str(replacement);
|
||||
out
|
||||
}
|
||||
|
||||
fn find_toml_table_bounds(source: &str, table_name: &str) -> Option<(usize, usize)> {
|
||||
let target = format!("[{}]", table_name);
|
||||
let mut offset = 0usize;
|
||||
let mut start = None;
|
||||
|
||||
for line in source.split_inclusive('\n') {
|
||||
let trimmed = line.trim();
|
||||
if let Some(start_offset) = start {
|
||||
if trimmed.starts_with('[') {
|
||||
return Some((start_offset, offset));
|
||||
}
|
||||
} else if trimmed == target {
|
||||
start = Some(offset);
|
||||
}
|
||||
offset = offset.saturating_add(line.len());
|
||||
}
|
||||
|
||||
start.map(|start_offset| (start_offset, source.len()))
|
||||
}
|
||||
|
||||
async fn write_atomic(path: PathBuf, contents: String) -> Result<(), ApiFailure> {
|
||||
tokio::task::spawn_blocking(move || write_atomic_sync(&path, &contents))
|
||||
.await
|
||||
|
||||
90
src/api/events.rs
Normal file
90
src/api/events.rs
Normal file
@@ -0,0 +1,90 @@
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::Mutex;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct ApiEventRecord {
|
||||
pub(super) seq: u64,
|
||||
pub(super) ts_epoch_secs: u64,
|
||||
pub(super) event_type: String,
|
||||
pub(super) context: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct ApiEventSnapshot {
|
||||
pub(super) capacity: usize,
|
||||
pub(super) dropped_total: u64,
|
||||
pub(super) events: Vec<ApiEventRecord>,
|
||||
}
|
||||
|
||||
struct ApiEventsInner {
|
||||
capacity: usize,
|
||||
dropped_total: u64,
|
||||
next_seq: u64,
|
||||
events: VecDeque<ApiEventRecord>,
|
||||
}
|
||||
|
||||
/// Bounded ring-buffer for control-plane API/runtime events.
|
||||
pub(crate) struct ApiEventStore {
|
||||
inner: Mutex<ApiEventsInner>,
|
||||
}
|
||||
|
||||
impl ApiEventStore {
|
||||
pub(super) fn new(capacity: usize) -> Self {
|
||||
let bounded = capacity.max(16);
|
||||
Self {
|
||||
inner: Mutex::new(ApiEventsInner {
|
||||
capacity: bounded,
|
||||
dropped_total: 0,
|
||||
next_seq: 1,
|
||||
events: VecDeque::with_capacity(bounded),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn record(&self, event_type: &str, context: impl Into<String>) {
|
||||
let now_epoch_secs = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
let mut context = context.into();
|
||||
if context.len() > 256 {
|
||||
context.truncate(256);
|
||||
}
|
||||
|
||||
let mut guard = self.inner.lock().expect("api event store mutex poisoned");
|
||||
if guard.events.len() == guard.capacity {
|
||||
guard.events.pop_front();
|
||||
guard.dropped_total = guard.dropped_total.saturating_add(1);
|
||||
}
|
||||
let seq = guard.next_seq;
|
||||
guard.next_seq = guard.next_seq.saturating_add(1);
|
||||
guard.events.push_back(ApiEventRecord {
|
||||
seq,
|
||||
ts_epoch_secs: now_epoch_secs,
|
||||
event_type: event_type.to_string(),
|
||||
context,
|
||||
});
|
||||
}
|
||||
|
||||
pub(super) fn snapshot(&self, limit: usize) -> ApiEventSnapshot {
|
||||
let guard = self.inner.lock().expect("api event store mutex poisoned");
|
||||
let bounded_limit = limit.clamp(1, guard.capacity.max(1));
|
||||
let mut items: Vec<ApiEventRecord> = guard
|
||||
.events
|
||||
.iter()
|
||||
.rev()
|
||||
.take(bounded_limit)
|
||||
.cloned()
|
||||
.collect();
|
||||
items.reverse();
|
||||
|
||||
ApiEventSnapshot {
|
||||
capacity: guard.capacity,
|
||||
dropped_total: guard.dropped_total,
|
||||
events: items,
|
||||
}
|
||||
}
|
||||
}
|
||||
88
src/api/http_utils.rs
Normal file
88
src/api/http_utils.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
use http_body_util::{BodyExt, Full};
|
||||
use hyper::StatusCode;
|
||||
use hyper::body::{Bytes, Incoming};
|
||||
use serde::Serialize;
|
||||
use serde::de::DeserializeOwned;
|
||||
|
||||
use super::model::{ApiFailure, ErrorBody, ErrorResponse, SuccessResponse};
|
||||
|
||||
pub(super) fn success_response<T: Serialize>(
|
||||
status: StatusCode,
|
||||
data: T,
|
||||
revision: String,
|
||||
) -> hyper::Response<Full<Bytes>> {
|
||||
let payload = SuccessResponse {
|
||||
ok: true,
|
||||
data,
|
||||
revision,
|
||||
};
|
||||
let body = serde_json::to_vec(&payload).unwrap_or_else(|_| b"{\"ok\":false}".to_vec());
|
||||
hyper::Response::builder()
|
||||
.status(status)
|
||||
.header("content-type", "application/json; charset=utf-8")
|
||||
.body(Full::new(Bytes::from(body)))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub(super) fn error_response(request_id: u64, failure: ApiFailure) -> hyper::Response<Full<Bytes>> {
|
||||
let payload = ErrorResponse {
|
||||
ok: false,
|
||||
error: ErrorBody {
|
||||
code: failure.code,
|
||||
message: failure.message,
|
||||
},
|
||||
request_id,
|
||||
};
|
||||
let body = serde_json::to_vec(&payload).unwrap_or_else(|_| {
|
||||
format!(
|
||||
"{{\"ok\":false,\"error\":{{\"code\":\"internal_error\",\"message\":\"serialization failed\"}},\"request_id\":{}}}",
|
||||
request_id
|
||||
)
|
||||
.into_bytes()
|
||||
});
|
||||
hyper::Response::builder()
|
||||
.status(failure.status)
|
||||
.header("content-type", "application/json; charset=utf-8")
|
||||
.body(Full::new(Bytes::from(body)))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub(super) async fn read_json<T: DeserializeOwned>(
|
||||
body: Incoming,
|
||||
limit: usize,
|
||||
) -> Result<T, ApiFailure> {
|
||||
let bytes = read_body_with_limit(body, limit).await?;
|
||||
serde_json::from_slice(&bytes).map_err(|_| ApiFailure::bad_request("Invalid JSON body"))
|
||||
}
|
||||
|
||||
pub(super) async fn read_optional_json<T: DeserializeOwned>(
|
||||
body: Incoming,
|
||||
limit: usize,
|
||||
) -> Result<Option<T>, ApiFailure> {
|
||||
let bytes = read_body_with_limit(body, limit).await?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
serde_json::from_slice(&bytes)
|
||||
.map(Some)
|
||||
.map_err(|_| ApiFailure::bad_request("Invalid JSON body"))
|
||||
}
|
||||
|
||||
async fn read_body_with_limit(body: Incoming, limit: usize) -> Result<Vec<u8>, ApiFailure> {
|
||||
let mut collected = Vec::new();
|
||||
let mut body = body;
|
||||
while let Some(frame_result) = body.frame().await {
|
||||
let frame = frame_result.map_err(|_| ApiFailure::bad_request("Invalid request body"))?;
|
||||
if let Some(chunk) = frame.data_ref() {
|
||||
if collected.len().saturating_add(chunk.len()) > limit {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::PAYLOAD_TOO_LARGE,
|
||||
"payload_too_large",
|
||||
format!("Body exceeds {} bytes", limit),
|
||||
));
|
||||
}
|
||||
collected.extend_from_slice(chunk);
|
||||
}
|
||||
}
|
||||
Ok(collected)
|
||||
}
|
||||
361
src/api/mod.rs
361
src/api/mod.rs
@@ -1,66 +1,118 @@
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
|
||||
use std::convert::Infallible;
|
||||
use std::net::SocketAddr;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
|
||||
|
||||
use http_body_util::{BodyExt, Full};
|
||||
use http_body_util::Full;
|
||||
use hyper::body::{Bytes, Incoming};
|
||||
use hyper::header::AUTHORIZATION;
|
||||
use hyper::server::conn::http1;
|
||||
use hyper::service::service_fn;
|
||||
use hyper::{Method, Request, Response, StatusCode};
|
||||
use serde::Serialize;
|
||||
use serde::de::DeserializeOwned;
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::sync::{Mutex, watch};
|
||||
use tokio::sync::{Mutex, RwLock, watch};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::proxy::route_mode::RouteRuntimeController;
|
||||
use crate::startup::StartupTracker;
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::UpstreamManager;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
|
||||
mod config_store;
|
||||
mod events;
|
||||
mod http_utils;
|
||||
mod model;
|
||||
mod runtime_edge;
|
||||
mod runtime_init;
|
||||
mod runtime_min;
|
||||
mod runtime_selftest;
|
||||
mod runtime_stats;
|
||||
mod runtime_watch;
|
||||
mod runtime_zero;
|
||||
mod users;
|
||||
|
||||
use config_store::{current_revision, parse_if_match};
|
||||
use events::ApiEventStore;
|
||||
use http_utils::{error_response, read_json, read_optional_json, success_response};
|
||||
use model::{
|
||||
ApiFailure, CreateUserRequest, ErrorBody, ErrorResponse, HealthData, PatchUserRequest,
|
||||
RotateSecretRequest, SuccessResponse, SummaryData,
|
||||
ApiFailure, CreateUserRequest, HealthData, PatchUserRequest, RotateSecretRequest, SummaryData,
|
||||
};
|
||||
use runtime_edge::{
|
||||
EdgeConnectionsCacheEntry, build_runtime_connections_summary_data,
|
||||
build_runtime_events_recent_data,
|
||||
};
|
||||
use runtime_init::build_runtime_initialization_data;
|
||||
use runtime_min::{
|
||||
build_runtime_me_pool_state_data, build_runtime_me_quality_data, build_runtime_nat_stun_data,
|
||||
build_runtime_upstream_quality_data, build_security_whitelist_data,
|
||||
};
|
||||
use runtime_selftest::build_runtime_me_selftest_data;
|
||||
use runtime_stats::{
|
||||
MinimalCacheEntry, build_dcs_data, build_me_writers_data, build_minimal_all_data,
|
||||
build_zero_all_data,
|
||||
build_upstreams_data, build_zero_all_data,
|
||||
};
|
||||
use runtime_watch::spawn_runtime_watchers;
|
||||
use runtime_zero::{
|
||||
build_limits_effective_data, build_runtime_gates_data, build_security_posture_data,
|
||||
build_system_info_data,
|
||||
};
|
||||
use users::{create_user, delete_user, patch_user, rotate_secret, users_from_config};
|
||||
|
||||
pub(super) struct ApiRuntimeState {
|
||||
pub(super) process_started_at_epoch_secs: u64,
|
||||
pub(super) config_reload_count: AtomicU64,
|
||||
pub(super) last_config_reload_epoch_secs: AtomicU64,
|
||||
pub(super) admission_open: AtomicBool,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(super) struct ApiShared {
|
||||
pub(super) stats: Arc<Stats>,
|
||||
pub(super) ip_tracker: Arc<UserIpTracker>,
|
||||
pub(super) me_pool: Option<Arc<MePool>>,
|
||||
pub(super) me_pool: Arc<RwLock<Option<Arc<MePool>>>>,
|
||||
pub(super) upstream_manager: Arc<UpstreamManager>,
|
||||
pub(super) config_path: PathBuf,
|
||||
pub(super) detected_ips_rx: watch::Receiver<(Option<IpAddr>, Option<IpAddr>)>,
|
||||
pub(super) mutation_lock: Arc<Mutex<()>>,
|
||||
pub(super) minimal_cache: Arc<Mutex<Option<MinimalCacheEntry>>>,
|
||||
pub(super) runtime_edge_connections_cache: Arc<Mutex<Option<EdgeConnectionsCacheEntry>>>,
|
||||
pub(super) runtime_edge_recompute_lock: Arc<Mutex<()>>,
|
||||
pub(super) runtime_events: Arc<ApiEventStore>,
|
||||
pub(super) request_id: Arc<AtomicU64>,
|
||||
pub(super) runtime_state: Arc<ApiRuntimeState>,
|
||||
pub(super) startup_tracker: Arc<StartupTracker>,
|
||||
pub(super) route_runtime: Arc<RouteRuntimeController>,
|
||||
}
|
||||
|
||||
impl ApiShared {
|
||||
fn next_request_id(&self) -> u64 {
|
||||
self.request_id.fetch_add(1, Ordering::Relaxed)
|
||||
}
|
||||
|
||||
fn detected_link_ips(&self) -> (Option<IpAddr>, Option<IpAddr>) {
|
||||
*self.detected_ips_rx.borrow()
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn serve(
|
||||
listen: SocketAddr,
|
||||
stats: Arc<Stats>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
me_pool: Arc<RwLock<Option<Arc<MePool>>>>,
|
||||
route_runtime: Arc<RouteRuntimeController>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
admission_rx: watch::Receiver<bool>,
|
||||
config_path: PathBuf,
|
||||
detected_ips_rx: watch::Receiver<(Option<IpAddr>, Option<IpAddr>)>,
|
||||
process_started_at_epoch_secs: u64,
|
||||
startup_tracker: Arc<StartupTracker>,
|
||||
) {
|
||||
let listener = match TcpListener::bind(listen).await {
|
||||
Ok(listener) => listener,
|
||||
@@ -76,16 +128,40 @@ pub async fn serve(
|
||||
|
||||
info!("API endpoint: http://{}/v1/*", listen);
|
||||
|
||||
let runtime_state = Arc::new(ApiRuntimeState {
|
||||
process_started_at_epoch_secs,
|
||||
config_reload_count: AtomicU64::new(0),
|
||||
last_config_reload_epoch_secs: AtomicU64::new(0),
|
||||
admission_open: AtomicBool::new(*admission_rx.borrow()),
|
||||
});
|
||||
|
||||
let shared = Arc::new(ApiShared {
|
||||
stats,
|
||||
ip_tracker,
|
||||
me_pool,
|
||||
upstream_manager,
|
||||
config_path,
|
||||
detected_ips_rx,
|
||||
mutation_lock: Arc::new(Mutex::new(())),
|
||||
minimal_cache: Arc::new(Mutex::new(None)),
|
||||
runtime_edge_connections_cache: Arc::new(Mutex::new(None)),
|
||||
runtime_edge_recompute_lock: Arc::new(Mutex::new(())),
|
||||
runtime_events: Arc::new(ApiEventStore::new(
|
||||
config_rx.borrow().server.api.runtime_edge_events_capacity,
|
||||
)),
|
||||
request_id: Arc::new(AtomicU64::new(1)),
|
||||
runtime_state: runtime_state.clone(),
|
||||
startup_tracker,
|
||||
route_runtime,
|
||||
});
|
||||
|
||||
spawn_runtime_watchers(
|
||||
config_rx.clone(),
|
||||
admission_rx.clone(),
|
||||
runtime_state.clone(),
|
||||
shared.runtime_events.clone(),
|
||||
);
|
||||
|
||||
loop {
|
||||
let (stream, peer) = match listener.accept().await {
|
||||
Ok(v) => v,
|
||||
@@ -134,15 +210,15 @@ async fn handle(
|
||||
));
|
||||
}
|
||||
|
||||
if !api_cfg.whitelist.is_empty()
|
||||
&& !api_cfg
|
||||
.whitelist
|
||||
.iter()
|
||||
.any(|net| net.contains(peer.ip()))
|
||||
if !api_cfg.whitelist.is_empty() && !api_cfg.whitelist.iter().any(|net| net.contains(peer.ip()))
|
||||
{
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(StatusCode::FORBIDDEN, "forbidden", "Source IP is not allowed"),
|
||||
ApiFailure::new(
|
||||
StatusCode::FORBIDDEN,
|
||||
"forbidden",
|
||||
"Source IP is not allowed",
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
@@ -167,6 +243,7 @@ async fn handle(
|
||||
|
||||
let method = req.method().clone();
|
||||
let path = req.uri().path().to_string();
|
||||
let query = req.uri().query().map(str::to_string);
|
||||
let body_limit = api_cfg.request_body_limit_bytes;
|
||||
|
||||
let result: Result<Response<Full<Bytes>>, ApiFailure> = async {
|
||||
@@ -179,6 +256,36 @@ async fn handle(
|
||||
};
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/system/info") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_system_info_data(shared.as_ref(), cfg.as_ref(), &revision);
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/gates") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_gates_data(shared.as_ref(), cfg.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/initialization") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_initialization_data(shared.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/limits/effective") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_limits_effective_data(cfg.as_ref());
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/security/posture") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_security_posture_data(cfg.as_ref());
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/security/whitelist") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_security_whitelist_data(cfg.as_ref());
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/summary") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = SummaryData {
|
||||
@@ -195,6 +302,11 @@ async fn handle(
|
||||
let data = build_zero_all_data(&shared.stats, cfg.access.users.len());
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/upstreams") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_upstreams_data(shared.as_ref(), api_cfg);
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/minimal/all") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_minimal_all_data(shared.as_ref(), api_cfg).await;
|
||||
@@ -210,9 +322,57 @@ async fn handle(
|
||||
let data = build_dcs_data(shared.as_ref(), api_cfg).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/me_pool_state") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_me_pool_state_data(shared.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/me_quality") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_me_quality_data(shared.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/upstream_quality") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_upstream_quality_data(shared.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/nat_stun") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_nat_stun_data(shared.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/me-selftest") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_me_selftest_data(shared.as_ref(), cfg.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/connections/summary") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data =
|
||||
build_runtime_connections_summary_data(shared.as_ref(), cfg.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/events/recent") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_events_recent_data(
|
||||
shared.as_ref(),
|
||||
cfg.as_ref(),
|
||||
query.as_deref(),
|
||||
);
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/users") | ("GET", "/v1/users") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let users = users_from_config(&cfg, &shared.stats, &shared.ip_tracker).await;
|
||||
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
)
|
||||
.await;
|
||||
Ok(success_response(StatusCode::OK, users, revision))
|
||||
}
|
||||
("POST", "/v1/users") => {
|
||||
@@ -228,7 +388,20 @@ async fn handle(
|
||||
}
|
||||
let expected_revision = parse_if_match(req.headers());
|
||||
let body = read_json::<CreateUserRequest>(req.into_body(), body_limit).await?;
|
||||
let (data, revision) = create_user(body, expected_revision, &shared).await?;
|
||||
let result = create_user(body, expected_revision, &shared).await;
|
||||
let (data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared
|
||||
.runtime_events
|
||||
.record("api.user.create.failed", error.code);
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
shared.runtime_events.record(
|
||||
"api.user.create.ok",
|
||||
format!("username={}", data.user.username),
|
||||
);
|
||||
Ok(success_response(StatusCode::CREATED, data, revision))
|
||||
}
|
||||
_ => {
|
||||
@@ -238,8 +411,17 @@ async fn handle(
|
||||
{
|
||||
if method == Method::GET {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let users = users_from_config(&cfg, &shared.stats, &shared.ip_tracker).await;
|
||||
if let Some(user_info) = users.into_iter().find(|entry| entry.username == user)
|
||||
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
)
|
||||
.await;
|
||||
if let Some(user_info) =
|
||||
users.into_iter().find(|entry| entry.username == user)
|
||||
{
|
||||
return Ok(success_response(StatusCode::OK, user_info, revision));
|
||||
}
|
||||
@@ -260,9 +442,22 @@ async fn handle(
|
||||
));
|
||||
}
|
||||
let expected_revision = parse_if_match(req.headers());
|
||||
let body = read_json::<PatchUserRequest>(req.into_body(), body_limit).await?;
|
||||
let (data, revision) =
|
||||
patch_user(user, body, expected_revision, &shared).await?;
|
||||
let body =
|
||||
read_json::<PatchUserRequest>(req.into_body(), body_limit).await?;
|
||||
let result = patch_user(user, body, expected_revision, &shared).await;
|
||||
let (data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared.runtime_events.record(
|
||||
"api.user.patch.failed",
|
||||
format!("username={} code={}", user, error.code),
|
||||
);
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
shared
|
||||
.runtime_events
|
||||
.record("api.user.patch.ok", format!("username={}", data.username));
|
||||
return Ok(success_response(StatusCode::OK, data, revision));
|
||||
}
|
||||
if method == Method::DELETE {
|
||||
@@ -277,8 +472,20 @@ async fn handle(
|
||||
));
|
||||
}
|
||||
let expected_revision = parse_if_match(req.headers());
|
||||
let (deleted_user, revision) =
|
||||
delete_user(user, expected_revision, &shared).await?;
|
||||
let result = delete_user(user, expected_revision, &shared).await;
|
||||
let (deleted_user, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared.runtime_events.record(
|
||||
"api.user.delete.failed",
|
||||
format!("username={} code={}", user, error.code),
|
||||
);
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
shared
|
||||
.runtime_events
|
||||
.record("api.user.delete.ok", format!("username={}", deleted_user));
|
||||
return Ok(success_response(StatusCode::OK, deleted_user, revision));
|
||||
}
|
||||
if method == Method::POST
|
||||
@@ -300,9 +507,27 @@ async fn handle(
|
||||
let body =
|
||||
read_optional_json::<RotateSecretRequest>(req.into_body(), body_limit)
|
||||
.await?;
|
||||
let (data, revision) =
|
||||
rotate_secret(base_user, body.unwrap_or_default(), expected_revision, &shared)
|
||||
.await?;
|
||||
let result = rotate_secret(
|
||||
base_user,
|
||||
body.unwrap_or_default(),
|
||||
expected_revision,
|
||||
&shared,
|
||||
)
|
||||
.await;
|
||||
let (data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared.runtime_events.record(
|
||||
"api.user.rotate_secret.failed",
|
||||
format!("username={} code={}", base_user, error.code),
|
||||
);
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
shared.runtime_events.record(
|
||||
"api.user.rotate_secret.ok",
|
||||
format!("username={}", base_user),
|
||||
);
|
||||
return Ok(success_response(StatusCode::OK, data, revision));
|
||||
}
|
||||
if method == Method::POST {
|
||||
@@ -334,81 +559,3 @@ async fn handle(
|
||||
Err(error) => Ok(error_response(request_id, error)),
|
||||
}
|
||||
}
|
||||
|
||||
fn success_response<T: Serialize>(
|
||||
status: StatusCode,
|
||||
data: T,
|
||||
revision: String,
|
||||
) -> Response<Full<Bytes>> {
|
||||
let payload = SuccessResponse {
|
||||
ok: true,
|
||||
data,
|
||||
revision,
|
||||
};
|
||||
let body = serde_json::to_vec(&payload).unwrap_or_else(|_| b"{\"ok\":false}".to_vec());
|
||||
Response::builder()
|
||||
.status(status)
|
||||
.header("content-type", "application/json; charset=utf-8")
|
||||
.body(Full::new(Bytes::from(body)))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn error_response(request_id: u64, failure: ApiFailure) -> Response<Full<Bytes>> {
|
||||
let payload = ErrorResponse {
|
||||
ok: false,
|
||||
error: ErrorBody {
|
||||
code: failure.code,
|
||||
message: failure.message,
|
||||
},
|
||||
request_id,
|
||||
};
|
||||
let body = serde_json::to_vec(&payload).unwrap_or_else(|_| {
|
||||
format!(
|
||||
"{{\"ok\":false,\"error\":{{\"code\":\"internal_error\",\"message\":\"serialization failed\"}},\"request_id\":{}}}",
|
||||
request_id
|
||||
)
|
||||
.into_bytes()
|
||||
});
|
||||
Response::builder()
|
||||
.status(failure.status)
|
||||
.header("content-type", "application/json; charset=utf-8")
|
||||
.body(Full::new(Bytes::from(body)))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
async fn read_json<T: DeserializeOwned>(body: Incoming, limit: usize) -> Result<T, ApiFailure> {
|
||||
let bytes = read_body_with_limit(body, limit).await?;
|
||||
serde_json::from_slice(&bytes).map_err(|_| ApiFailure::bad_request("Invalid JSON body"))
|
||||
}
|
||||
|
||||
async fn read_optional_json<T: DeserializeOwned>(
|
||||
body: Incoming,
|
||||
limit: usize,
|
||||
) -> Result<Option<T>, ApiFailure> {
|
||||
let bytes = read_body_with_limit(body, limit).await?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
serde_json::from_slice(&bytes)
|
||||
.map(Some)
|
||||
.map_err(|_| ApiFailure::bad_request("Invalid JSON body"))
|
||||
}
|
||||
|
||||
async fn read_body_with_limit(body: Incoming, limit: usize) -> Result<Vec<u8>, ApiFailure> {
|
||||
let mut collected = Vec::new();
|
||||
let mut body = body;
|
||||
while let Some(frame_result) = body.frame().await {
|
||||
let frame = frame_result.map_err(|_| ApiFailure::bad_request("Invalid request body"))?;
|
||||
if let Some(chunk) = frame.data_ref() {
|
||||
if collected.len().saturating_add(chunk.len()) > limit {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::PAYLOAD_TOO_LARGE,
|
||||
"payload_too_large",
|
||||
format!("Body exceeds {} bytes", limit),
|
||||
));
|
||||
}
|
||||
collected.extend_from_slice(chunk);
|
||||
}
|
||||
}
|
||||
Ok(collected)
|
||||
}
|
||||
|
||||
119
src/api/model.rs
119
src/api/model.rs
@@ -1,8 +1,12 @@
|
||||
use std::net::IpAddr;
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use hyper::StatusCode;
|
||||
use rand::Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::crypto::SecureRandom;
|
||||
|
||||
const MAX_USERNAME_LEN: usize = 64;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -103,6 +107,51 @@ pub(super) struct ZeroUpstreamData {
|
||||
pub(super) connect_duration_fail_bucket_gt_1000ms: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct UpstreamDcStatus {
|
||||
pub(super) dc: i16,
|
||||
pub(super) latency_ema_ms: Option<f64>,
|
||||
pub(super) ip_preference: &'static str,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct UpstreamStatus {
|
||||
pub(super) upstream_id: usize,
|
||||
pub(super) route_kind: &'static str,
|
||||
pub(super) address: String,
|
||||
pub(super) weight: u16,
|
||||
pub(super) scopes: String,
|
||||
pub(super) healthy: bool,
|
||||
pub(super) fails: u32,
|
||||
pub(super) last_check_age_secs: u64,
|
||||
pub(super) effective_latency_ms: Option<f64>,
|
||||
pub(super) dc: Vec<UpstreamDcStatus>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct UpstreamSummaryData {
|
||||
pub(super) configured_total: usize,
|
||||
pub(super) healthy_total: usize,
|
||||
pub(super) unhealthy_total: usize,
|
||||
pub(super) direct_total: usize,
|
||||
pub(super) socks4_total: usize,
|
||||
pub(super) socks5_total: usize,
|
||||
pub(super) shadowsocks_total: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct UpstreamsData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) zero: ZeroUpstreamData,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) summary: Option<UpstreamSummaryData>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) upstreams: Option<Vec<UpstreamStatus>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroMiddleProxyData {
|
||||
pub(super) keepalive_sent_total: u64,
|
||||
@@ -125,6 +174,24 @@ pub(super) struct ZeroMiddleProxyData {
|
||||
pub(super) route_drop_queue_full_total: u64,
|
||||
pub(super) route_drop_queue_full_base_total: u64,
|
||||
pub(super) route_drop_queue_full_high_total: u64,
|
||||
pub(super) d2c_batches_total: u64,
|
||||
pub(super) d2c_batch_frames_total: u64,
|
||||
pub(super) d2c_batch_bytes_total: u64,
|
||||
pub(super) d2c_flush_reason_queue_drain_total: u64,
|
||||
pub(super) d2c_flush_reason_batch_frames_total: u64,
|
||||
pub(super) d2c_flush_reason_batch_bytes_total: u64,
|
||||
pub(super) d2c_flush_reason_max_delay_total: u64,
|
||||
pub(super) d2c_flush_reason_ack_immediate_total: u64,
|
||||
pub(super) d2c_flush_reason_close_total: u64,
|
||||
pub(super) d2c_data_frames_total: u64,
|
||||
pub(super) d2c_ack_frames_total: u64,
|
||||
pub(super) d2c_payload_bytes_total: u64,
|
||||
pub(super) d2c_write_mode_coalesced_total: u64,
|
||||
pub(super) d2c_write_mode_split_total: u64,
|
||||
pub(super) d2c_quota_reject_pre_write_total: u64,
|
||||
pub(super) d2c_quota_reject_post_write_total: u64,
|
||||
pub(super) d2c_frame_buf_shrink_total: u64,
|
||||
pub(super) d2c_frame_buf_shrink_bytes_total: u64,
|
||||
pub(super) socks_kdf_strict_reject_total: u64,
|
||||
pub(super) socks_kdf_compat_fallback_total: u64,
|
||||
pub(super) endpoint_quarantine_total: u64,
|
||||
@@ -190,6 +257,8 @@ pub(super) struct MeWritersSummary {
|
||||
pub(super) required_writers: usize,
|
||||
pub(super) alive_writers: usize,
|
||||
pub(super) coverage_pct: f64,
|
||||
pub(super) fresh_alive_writers: usize,
|
||||
pub(super) fresh_coverage_pct: f64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
@@ -204,6 +273,12 @@ pub(super) struct MeWriterStatus {
|
||||
pub(super) bound_clients: usize,
|
||||
pub(super) idle_for_secs: Option<u64>,
|
||||
pub(super) rtt_ema_ms: Option<f64>,
|
||||
pub(super) matches_active_generation: bool,
|
||||
pub(super) in_desired_map: bool,
|
||||
pub(super) allow_drain_fallback: bool,
|
||||
pub(super) drain_started_at_epoch_secs: Option<u64>,
|
||||
pub(super) drain_deadline_epoch_secs: Option<u64>,
|
||||
pub(super) drain_over_ttl: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
@@ -220,15 +295,28 @@ pub(super) struct MeWritersData {
|
||||
pub(super) struct DcStatus {
|
||||
pub(super) dc: i16,
|
||||
pub(super) endpoints: Vec<String>,
|
||||
pub(super) endpoint_writers: Vec<DcEndpointWriters>,
|
||||
pub(super) available_endpoints: usize,
|
||||
pub(super) available_pct: f64,
|
||||
pub(super) required_writers: usize,
|
||||
pub(super) floor_min: usize,
|
||||
pub(super) floor_target: usize,
|
||||
pub(super) floor_max: usize,
|
||||
pub(super) floor_capped: bool,
|
||||
pub(super) alive_writers: usize,
|
||||
pub(super) coverage_pct: f64,
|
||||
pub(super) fresh_alive_writers: usize,
|
||||
pub(super) fresh_coverage_pct: f64,
|
||||
pub(super) rtt_ms: Option<f64>,
|
||||
pub(super) load: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct DcEndpointWriters {
|
||||
pub(super) endpoint: String,
|
||||
pub(super) active_writers: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct DcStatusData {
|
||||
pub(super) middle_proxy_enabled: bool,
|
||||
@@ -262,7 +350,27 @@ pub(super) struct MinimalMeRuntimeData {
|
||||
pub(super) floor_mode: &'static str,
|
||||
pub(super) adaptive_floor_idle_secs: u64,
|
||||
pub(super) adaptive_floor_min_writers_single_endpoint: u8,
|
||||
pub(super) adaptive_floor_min_writers_multi_endpoint: u8,
|
||||
pub(super) adaptive_floor_recover_grace_secs: u64,
|
||||
pub(super) adaptive_floor_writers_per_core_total: u16,
|
||||
pub(super) adaptive_floor_cpu_cores_override: u16,
|
||||
pub(super) adaptive_floor_max_extra_writers_single_per_core: u16,
|
||||
pub(super) adaptive_floor_max_extra_writers_multi_per_core: u16,
|
||||
pub(super) adaptive_floor_max_active_writers_per_core: u16,
|
||||
pub(super) adaptive_floor_max_warm_writers_per_core: u16,
|
||||
pub(super) adaptive_floor_max_active_writers_global: u32,
|
||||
pub(super) adaptive_floor_max_warm_writers_global: u32,
|
||||
pub(super) adaptive_floor_cpu_cores_detected: u32,
|
||||
pub(super) adaptive_floor_cpu_cores_effective: u32,
|
||||
pub(super) adaptive_floor_global_cap_raw: u64,
|
||||
pub(super) adaptive_floor_global_cap_effective: u64,
|
||||
pub(super) adaptive_floor_target_writers_total: u64,
|
||||
pub(super) adaptive_floor_active_cap_configured: u64,
|
||||
pub(super) adaptive_floor_active_cap_effective: u64,
|
||||
pub(super) adaptive_floor_warm_cap_configured: u64,
|
||||
pub(super) adaptive_floor_warm_cap_effective: u64,
|
||||
pub(super) adaptive_floor_active_writers_current: u64,
|
||||
pub(super) adaptive_floor_warm_writers_current: u64,
|
||||
pub(super) me_keepalive_enabled: bool,
|
||||
pub(super) me_keepalive_interval_secs: u64,
|
||||
pub(super) me_keepalive_jitter_secs: u64,
|
||||
@@ -284,6 +392,8 @@ pub(super) struct MinimalMeRuntimeData {
|
||||
pub(super) me_single_endpoint_outage_backoff_max_ms: u64,
|
||||
pub(super) me_single_endpoint_shadow_rotate_every_secs: u64,
|
||||
pub(super) me_deterministic_writer_sort: bool,
|
||||
pub(super) me_writer_pick_mode: &'static str,
|
||||
pub(super) me_writer_pick_sample_size: u8,
|
||||
pub(super) me_socks_kdf_policy: &'static str,
|
||||
pub(super) quarantined_endpoints_total: usize,
|
||||
pub(super) quarantined_endpoints: Vec<MinimalQuarantineData>,
|
||||
@@ -325,6 +435,9 @@ pub(super) struct UserInfo {
|
||||
pub(super) max_unique_ips: Option<usize>,
|
||||
pub(super) current_connections: u64,
|
||||
pub(super) active_unique_ips: usize,
|
||||
pub(super) active_unique_ips_list: Vec<IpAddr>,
|
||||
pub(super) recent_unique_ips: usize,
|
||||
pub(super) recent_unique_ips_list: Vec<IpAddr>,
|
||||
pub(super) total_octets: u64,
|
||||
pub(super) links: UserLinks,
|
||||
}
|
||||
@@ -389,7 +502,9 @@ pub(super) fn is_valid_username(user: &str) -> bool {
|
||||
}
|
||||
|
||||
pub(super) fn random_user_secret() -> String {
|
||||
static API_SECRET_RNG: OnceLock<SecureRandom> = OnceLock::new();
|
||||
let rng = API_SECRET_RNG.get_or_init(SecureRandom::new);
|
||||
let mut bytes = [0u8; 16];
|
||||
rand::rng().fill(&mut bytes);
|
||||
rng.fill(&mut bytes);
|
||||
hex::encode(bytes)
|
||||
}
|
||||
|
||||
294
src/api/runtime_edge.rs
Normal file
294
src/api/runtime_edge.rs
Normal file
@@ -0,0 +1,294 @@
|
||||
use std::cmp::Reverse;
|
||||
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
|
||||
use super::ApiShared;
|
||||
use super::events::ApiEventRecord;
|
||||
|
||||
const FEATURE_DISABLED_REASON: &str = "feature_disabled";
|
||||
const SOURCE_UNAVAILABLE_REASON: &str = "source_unavailable";
|
||||
const EVENTS_DEFAULT_LIMIT: usize = 50;
|
||||
const EVENTS_MAX_LIMIT: usize = 1000;
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionUserData {
|
||||
pub(super) username: String,
|
||||
pub(super) current_connections: u64,
|
||||
pub(super) total_octets: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionTotalsData {
|
||||
pub(super) current_connections: u64,
|
||||
pub(super) current_connections_me: u64,
|
||||
pub(super) current_connections_direct: u64,
|
||||
pub(super) active_users: usize,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionTopData {
|
||||
pub(super) limit: usize,
|
||||
pub(super) by_connections: Vec<RuntimeEdgeConnectionUserData>,
|
||||
pub(super) by_throughput: Vec<RuntimeEdgeConnectionUserData>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionCacheData {
|
||||
pub(super) ttl_ms: u64,
|
||||
pub(super) served_from_cache: bool,
|
||||
pub(super) stale_cache_used: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionTelemetryData {
|
||||
pub(super) user_enabled: bool,
|
||||
pub(super) throughput_is_cumulative: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionsSummaryPayload {
|
||||
pub(super) cache: RuntimeEdgeConnectionCacheData,
|
||||
pub(super) totals: RuntimeEdgeConnectionTotalsData,
|
||||
pub(super) top: RuntimeEdgeConnectionTopData,
|
||||
pub(super) telemetry: RuntimeEdgeConnectionTelemetryData,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionsSummaryData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeEdgeConnectionsSummaryPayload>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct EdgeConnectionsCacheEntry {
|
||||
pub(super) expires_at: Instant,
|
||||
pub(super) payload: RuntimeEdgeConnectionsSummaryPayload,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeEdgeEventsPayload {
|
||||
pub(super) capacity: usize,
|
||||
pub(super) dropped_total: u64,
|
||||
pub(super) events: Vec<ApiEventRecord>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeEdgeEventsData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeEdgeEventsPayload>,
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_connections_summary_data(
|
||||
shared: &ApiShared,
|
||||
cfg: &ProxyConfig,
|
||||
) -> RuntimeEdgeConnectionsSummaryData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let api_cfg = &cfg.server.api;
|
||||
if !api_cfg.runtime_edge_enabled {
|
||||
return RuntimeEdgeConnectionsSummaryData {
|
||||
enabled: false,
|
||||
reason: Some(FEATURE_DISABLED_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
}
|
||||
|
||||
let (generated_at_epoch_secs, payload) = match get_connections_payload_cached(
|
||||
shared,
|
||||
api_cfg.runtime_edge_cache_ttl_ms,
|
||||
api_cfg.runtime_edge_top_n,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Some(v) => v,
|
||||
None => {
|
||||
return RuntimeEdgeConnectionsSummaryData {
|
||||
enabled: true,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
RuntimeEdgeConnectionsSummaryData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
data: Some(payload),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn build_runtime_events_recent_data(
|
||||
shared: &ApiShared,
|
||||
cfg: &ProxyConfig,
|
||||
query: Option<&str>,
|
||||
) -> RuntimeEdgeEventsData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let api_cfg = &cfg.server.api;
|
||||
if !api_cfg.runtime_edge_enabled {
|
||||
return RuntimeEdgeEventsData {
|
||||
enabled: false,
|
||||
reason: Some(FEATURE_DISABLED_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
}
|
||||
|
||||
let limit = parse_recent_events_limit(query, EVENTS_DEFAULT_LIMIT, EVENTS_MAX_LIMIT);
|
||||
let snapshot = shared.runtime_events.snapshot(limit);
|
||||
|
||||
RuntimeEdgeEventsData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: Some(RuntimeEdgeEventsPayload {
|
||||
capacity: snapshot.capacity,
|
||||
dropped_total: snapshot.dropped_total,
|
||||
events: snapshot.events,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_connections_payload_cached(
|
||||
shared: &ApiShared,
|
||||
cache_ttl_ms: u64,
|
||||
top_n: usize,
|
||||
) -> Option<(u64, RuntimeEdgeConnectionsSummaryPayload)> {
|
||||
if cache_ttl_ms > 0 {
|
||||
let now = Instant::now();
|
||||
let cached = shared.runtime_edge_connections_cache.lock().await.clone();
|
||||
if let Some(entry) = cached
|
||||
&& now < entry.expires_at
|
||||
{
|
||||
let mut payload = entry.payload;
|
||||
payload.cache.served_from_cache = true;
|
||||
payload.cache.stale_cache_used = false;
|
||||
return Some((entry.generated_at_epoch_secs, payload));
|
||||
}
|
||||
}
|
||||
|
||||
let Ok(_guard) = shared.runtime_edge_recompute_lock.try_lock() else {
|
||||
let cached = shared.runtime_edge_connections_cache.lock().await.clone();
|
||||
if let Some(entry) = cached {
|
||||
let mut payload = entry.payload;
|
||||
payload.cache.served_from_cache = true;
|
||||
payload.cache.stale_cache_used = true;
|
||||
return Some((entry.generated_at_epoch_secs, payload));
|
||||
}
|
||||
return None;
|
||||
};
|
||||
|
||||
let generated_at_epoch_secs = now_epoch_secs();
|
||||
let payload = recompute_connections_payload(shared, cache_ttl_ms, top_n).await;
|
||||
|
||||
if cache_ttl_ms > 0 {
|
||||
let entry = EdgeConnectionsCacheEntry {
|
||||
expires_at: Instant::now() + Duration::from_millis(cache_ttl_ms),
|
||||
payload: payload.clone(),
|
||||
generated_at_epoch_secs,
|
||||
};
|
||||
*shared.runtime_edge_connections_cache.lock().await = Some(entry);
|
||||
}
|
||||
|
||||
Some((generated_at_epoch_secs, payload))
|
||||
}
|
||||
|
||||
async fn recompute_connections_payload(
|
||||
shared: &ApiShared,
|
||||
cache_ttl_ms: u64,
|
||||
top_n: usize,
|
||||
) -> RuntimeEdgeConnectionsSummaryPayload {
|
||||
let mut rows = Vec::<RuntimeEdgeConnectionUserData>::new();
|
||||
let mut active_users = 0usize;
|
||||
for entry in shared.stats.iter_user_stats() {
|
||||
let user_stats = entry.value();
|
||||
let current_connections = user_stats
|
||||
.curr_connects
|
||||
.load(std::sync::atomic::Ordering::Relaxed);
|
||||
let total_octets = user_stats
|
||||
.octets_from_client
|
||||
.load(std::sync::atomic::Ordering::Relaxed)
|
||||
.saturating_add(
|
||||
user_stats
|
||||
.octets_to_client
|
||||
.load(std::sync::atomic::Ordering::Relaxed),
|
||||
);
|
||||
if current_connections > 0 {
|
||||
active_users = active_users.saturating_add(1);
|
||||
}
|
||||
rows.push(RuntimeEdgeConnectionUserData {
|
||||
username: entry.key().clone(),
|
||||
current_connections,
|
||||
total_octets,
|
||||
});
|
||||
}
|
||||
|
||||
let limit = top_n.max(1);
|
||||
let mut by_connections = rows.clone();
|
||||
by_connections.sort_by_key(|row| (Reverse(row.current_connections), row.username.clone()));
|
||||
by_connections.truncate(limit);
|
||||
|
||||
let mut by_throughput = rows;
|
||||
by_throughput.sort_by_key(|row| (Reverse(row.total_octets), row.username.clone()));
|
||||
by_throughput.truncate(limit);
|
||||
|
||||
let telemetry = shared.stats.telemetry_policy();
|
||||
RuntimeEdgeConnectionsSummaryPayload {
|
||||
cache: RuntimeEdgeConnectionCacheData {
|
||||
ttl_ms: cache_ttl_ms,
|
||||
served_from_cache: false,
|
||||
stale_cache_used: false,
|
||||
},
|
||||
totals: RuntimeEdgeConnectionTotalsData {
|
||||
current_connections: shared.stats.get_current_connections_total(),
|
||||
current_connections_me: shared.stats.get_current_connections_me(),
|
||||
current_connections_direct: shared.stats.get_current_connections_direct(),
|
||||
active_users,
|
||||
},
|
||||
top: RuntimeEdgeConnectionTopData {
|
||||
limit,
|
||||
by_connections,
|
||||
by_throughput,
|
||||
},
|
||||
telemetry: RuntimeEdgeConnectionTelemetryData {
|
||||
user_enabled: telemetry.user_enabled,
|
||||
throughput_is_cumulative: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_recent_events_limit(query: Option<&str>, default_limit: usize, max_limit: usize) -> usize {
|
||||
let Some(query) = query else {
|
||||
return default_limit;
|
||||
};
|
||||
for pair in query.split('&') {
|
||||
let mut split = pair.splitn(2, '=');
|
||||
if split.next() == Some("limit")
|
||||
&& let Some(raw) = split.next()
|
||||
&& let Ok(parsed) = raw.parse::<usize>()
|
||||
{
|
||||
return parsed.clamp(1, max_limit);
|
||||
}
|
||||
}
|
||||
default_limit
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
182
src/api/runtime_init.rs
Normal file
182
src/api/runtime_init.rs
Normal file
@@ -0,0 +1,182 @@
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::startup::{
|
||||
COMPONENT_ME_CONNECTIVITY_PING, COMPONENT_ME_POOL_CONSTRUCT, COMPONENT_ME_POOL_INIT_STAGE1,
|
||||
COMPONENT_ME_PROXY_CONFIG_V4, COMPONENT_ME_PROXY_CONFIG_V6, COMPONENT_ME_SECRET_FETCH,
|
||||
StartupComponentStatus, StartupMeStatus, compute_progress_pct,
|
||||
};
|
||||
|
||||
use super::ApiShared;
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeInitializationComponentData {
|
||||
pub(super) id: &'static str,
|
||||
pub(super) title: &'static str,
|
||||
pub(super) status: &'static str,
|
||||
pub(super) started_at_epoch_ms: Option<u64>,
|
||||
pub(super) finished_at_epoch_ms: Option<u64>,
|
||||
pub(super) duration_ms: Option<u64>,
|
||||
pub(super) attempts: u32,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) details: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeInitializationMeData {
|
||||
pub(super) status: &'static str,
|
||||
pub(super) current_stage: String,
|
||||
pub(super) progress_pct: f64,
|
||||
pub(super) init_attempt: u32,
|
||||
pub(super) retry_limit: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) last_error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeInitializationData {
|
||||
pub(super) status: &'static str,
|
||||
pub(super) degraded: bool,
|
||||
pub(super) current_stage: String,
|
||||
pub(super) progress_pct: f64,
|
||||
pub(super) started_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) ready_at_epoch_secs: Option<u64>,
|
||||
pub(super) total_elapsed_ms: u64,
|
||||
pub(super) transport_mode: String,
|
||||
pub(super) me: RuntimeInitializationMeData,
|
||||
pub(super) components: Vec<RuntimeInitializationComponentData>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(super) struct RuntimeStartupSummaryData {
|
||||
pub(super) status: &'static str,
|
||||
pub(super) stage: String,
|
||||
pub(super) progress_pct: f64,
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_startup_summary(shared: &ApiShared) -> RuntimeStartupSummaryData {
|
||||
let snapshot = shared.startup_tracker.snapshot().await;
|
||||
let me_pool_progress = current_me_pool_stage_progress(shared).await;
|
||||
let progress_pct = compute_progress_pct(&snapshot, me_pool_progress);
|
||||
RuntimeStartupSummaryData {
|
||||
status: snapshot.status.as_str(),
|
||||
stage: snapshot.current_stage,
|
||||
progress_pct,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_initialization_data(
|
||||
shared: &ApiShared,
|
||||
) -> RuntimeInitializationData {
|
||||
let snapshot = shared.startup_tracker.snapshot().await;
|
||||
let me_pool_progress = current_me_pool_stage_progress(shared).await;
|
||||
let progress_pct = compute_progress_pct(&snapshot, me_pool_progress);
|
||||
let me_progress_pct = compute_me_progress_pct(&snapshot, me_pool_progress);
|
||||
|
||||
RuntimeInitializationData {
|
||||
status: snapshot.status.as_str(),
|
||||
degraded: snapshot.degraded,
|
||||
current_stage: snapshot.current_stage,
|
||||
progress_pct,
|
||||
started_at_epoch_secs: snapshot.started_at_epoch_secs,
|
||||
ready_at_epoch_secs: snapshot.ready_at_epoch_secs,
|
||||
total_elapsed_ms: snapshot.total_elapsed_ms,
|
||||
transport_mode: snapshot.transport_mode,
|
||||
me: RuntimeInitializationMeData {
|
||||
status: snapshot.me.status.as_str(),
|
||||
current_stage: snapshot.me.current_stage,
|
||||
progress_pct: me_progress_pct,
|
||||
init_attempt: snapshot.me.init_attempt,
|
||||
retry_limit: snapshot.me.retry_limit,
|
||||
last_error: snapshot.me.last_error,
|
||||
},
|
||||
components: snapshot
|
||||
.components
|
||||
.into_iter()
|
||||
.map(|component| RuntimeInitializationComponentData {
|
||||
id: component.id,
|
||||
title: component.title,
|
||||
status: component.status.as_str(),
|
||||
started_at_epoch_ms: component.started_at_epoch_ms,
|
||||
finished_at_epoch_ms: component.finished_at_epoch_ms,
|
||||
duration_ms: component.duration_ms,
|
||||
attempts: component.attempts,
|
||||
details: component.details,
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_me_progress_pct(
|
||||
snapshot: &crate::startup::StartupSnapshot,
|
||||
me_pool_progress: Option<f64>,
|
||||
) -> f64 {
|
||||
match snapshot.me.status {
|
||||
StartupMeStatus::Pending => 0.0,
|
||||
StartupMeStatus::Ready | StartupMeStatus::Failed | StartupMeStatus::Skipped => 100.0,
|
||||
StartupMeStatus::Initializing => {
|
||||
let mut total_weight = 0.0f64;
|
||||
let mut completed_weight = 0.0f64;
|
||||
for component in &snapshot.components {
|
||||
if !is_me_component(component.id) {
|
||||
continue;
|
||||
}
|
||||
total_weight += component.weight;
|
||||
let unit_progress = match component.status {
|
||||
StartupComponentStatus::Pending => 0.0,
|
||||
StartupComponentStatus::Running => {
|
||||
if component.id == COMPONENT_ME_POOL_INIT_STAGE1 {
|
||||
me_pool_progress.unwrap_or(0.0).clamp(0.0, 1.0)
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
}
|
||||
StartupComponentStatus::Ready
|
||||
| StartupComponentStatus::Failed
|
||||
| StartupComponentStatus::Skipped => 1.0,
|
||||
};
|
||||
completed_weight += component.weight * unit_progress;
|
||||
}
|
||||
if total_weight <= f64::EPSILON {
|
||||
0.0
|
||||
} else {
|
||||
((completed_weight / total_weight) * 100.0).clamp(0.0, 100.0)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn is_me_component(component_id: &str) -> bool {
|
||||
matches!(
|
||||
component_id,
|
||||
COMPONENT_ME_SECRET_FETCH
|
||||
| COMPONENT_ME_PROXY_CONFIG_V4
|
||||
| COMPONENT_ME_PROXY_CONFIG_V6
|
||||
| COMPONENT_ME_POOL_CONSTRUCT
|
||||
| COMPONENT_ME_POOL_INIT_STAGE1
|
||||
| COMPONENT_ME_CONNECTIVITY_PING
|
||||
)
|
||||
}
|
||||
|
||||
async fn current_me_pool_stage_progress(shared: &ApiShared) -> Option<f64> {
|
||||
let snapshot = shared.startup_tracker.snapshot().await;
|
||||
if snapshot.me.status != StartupMeStatus::Initializing {
|
||||
return None;
|
||||
}
|
||||
|
||||
let pool = shared.me_pool.read().await.clone()?;
|
||||
let status = pool.api_status_snapshot().await;
|
||||
let configured_dc_groups = status.configured_dc_groups;
|
||||
let covered_dc_groups = status.dcs.iter().filter(|dc| dc.alive_writers > 0).count();
|
||||
|
||||
let dc_coverage = ratio_01(covered_dc_groups, configured_dc_groups);
|
||||
let writer_coverage = ratio_01(status.alive_writers, status.required_writers);
|
||||
Some((0.7 * dc_coverage + 0.3 * writer_coverage).clamp(0.0, 1.0))
|
||||
}
|
||||
|
||||
fn ratio_01(part: usize, total: usize) -> f64 {
|
||||
if total == 0 {
|
||||
return 0.0;
|
||||
}
|
||||
((part as f64) / (total as f64)).clamp(0.0, 1.0)
|
||||
}
|
||||
586
src/api/runtime_min.rs
Normal file
586
src/api/runtime_min.rs
Normal file
@@ -0,0 +1,586 @@
|
||||
use std::collections::BTreeSet;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
|
||||
use super::ApiShared;
|
||||
|
||||
const SOURCE_UNAVAILABLE_REASON: &str = "source_unavailable";
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct SecurityWhitelistData {
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) enabled: bool,
|
||||
pub(super) entries_total: usize,
|
||||
pub(super) entries: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateGenerationData {
|
||||
pub(super) active_generation: u64,
|
||||
pub(super) warm_generation: u64,
|
||||
pub(super) pending_hardswap_generation: u64,
|
||||
pub(super) pending_hardswap_age_secs: Option<u64>,
|
||||
pub(super) draining_generations: Vec<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateHardswapData {
|
||||
pub(super) enabled: bool,
|
||||
pub(super) pending: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateWriterContourData {
|
||||
pub(super) warm: usize,
|
||||
pub(super) active: usize,
|
||||
pub(super) draining: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateWriterHealthData {
|
||||
pub(super) healthy: usize,
|
||||
pub(super) degraded: usize,
|
||||
pub(super) draining: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateWriterData {
|
||||
pub(super) total: usize,
|
||||
pub(super) alive_non_draining: usize,
|
||||
pub(super) draining: usize,
|
||||
pub(super) degraded: usize,
|
||||
pub(super) contour: RuntimeMePoolStateWriterContourData,
|
||||
pub(super) health: RuntimeMePoolStateWriterHealthData,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateRefillDcData {
|
||||
pub(super) dc: i16,
|
||||
pub(super) family: &'static str,
|
||||
pub(super) inflight: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateRefillData {
|
||||
pub(super) inflight_endpoints_total: usize,
|
||||
pub(super) inflight_dc_total: usize,
|
||||
pub(super) by_dc: Vec<RuntimeMePoolStateRefillDcData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStatePayload {
|
||||
pub(super) generations: RuntimeMePoolStateGenerationData,
|
||||
pub(super) hardswap: RuntimeMePoolStateHardswapData,
|
||||
pub(super) writers: RuntimeMePoolStateWriterData,
|
||||
pub(super) refill: RuntimeMePoolStateRefillData,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeMePoolStatePayload>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityCountersData {
|
||||
pub(super) idle_close_by_peer_total: u64,
|
||||
pub(super) reader_eof_total: u64,
|
||||
pub(super) kdf_drift_total: u64,
|
||||
pub(super) kdf_port_only_drift_total: u64,
|
||||
pub(super) reconnect_attempt_total: u64,
|
||||
pub(super) reconnect_success_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityRouteDropData {
|
||||
pub(super) no_conn_total: u64,
|
||||
pub(super) channel_closed_total: u64,
|
||||
pub(super) queue_full_total: u64,
|
||||
pub(super) queue_full_base_total: u64,
|
||||
pub(super) queue_full_high_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityFamilyStateData {
|
||||
pub(super) family: &'static str,
|
||||
pub(super) state: &'static str,
|
||||
pub(super) state_since_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) suppressed_until_epoch_secs: Option<u64>,
|
||||
pub(super) fail_streak: u32,
|
||||
pub(super) recover_success_streak: u32,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityDrainGateData {
|
||||
pub(super) route_quorum_ok: bool,
|
||||
pub(super) redundancy_ok: bool,
|
||||
pub(super) block_reason: &'static str,
|
||||
pub(super) updated_at_epoch_secs: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityDcRttData {
|
||||
pub(super) dc: i16,
|
||||
pub(super) rtt_ema_ms: Option<f64>,
|
||||
pub(super) alive_writers: usize,
|
||||
pub(super) required_writers: usize,
|
||||
pub(super) coverage_pct: f64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityPayload {
|
||||
pub(super) counters: RuntimeMeQualityCountersData,
|
||||
pub(super) route_drops: RuntimeMeQualityRouteDropData,
|
||||
pub(super) family_states: Vec<RuntimeMeQualityFamilyStateData>,
|
||||
pub(super) drain_gate: RuntimeMeQualityDrainGateData,
|
||||
pub(super) dc_rtt: Vec<RuntimeMeQualityDcRttData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeMeQualityPayload>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityPolicyData {
|
||||
pub(super) connect_retry_attempts: u32,
|
||||
pub(super) connect_retry_backoff_ms: u64,
|
||||
pub(super) connect_budget_ms: u64,
|
||||
pub(super) unhealthy_fail_threshold: u32,
|
||||
pub(super) connect_failfast_hard_errors: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityCountersData {
|
||||
pub(super) connect_attempt_total: u64,
|
||||
pub(super) connect_success_total: u64,
|
||||
pub(super) connect_fail_total: u64,
|
||||
pub(super) connect_failfast_hard_error_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualitySummaryData {
|
||||
pub(super) configured_total: usize,
|
||||
pub(super) healthy_total: usize,
|
||||
pub(super) unhealthy_total: usize,
|
||||
pub(super) direct_total: usize,
|
||||
pub(super) socks4_total: usize,
|
||||
pub(super) socks5_total: usize,
|
||||
pub(super) shadowsocks_total: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityDcData {
|
||||
pub(super) dc: i16,
|
||||
pub(super) latency_ema_ms: Option<f64>,
|
||||
pub(super) ip_preference: &'static str,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityUpstreamData {
|
||||
pub(super) upstream_id: usize,
|
||||
pub(super) route_kind: &'static str,
|
||||
pub(super) address: String,
|
||||
pub(super) weight: u16,
|
||||
pub(super) scopes: String,
|
||||
pub(super) healthy: bool,
|
||||
pub(super) fails: u32,
|
||||
pub(super) last_check_age_secs: u64,
|
||||
pub(super) effective_latency_ms: Option<f64>,
|
||||
pub(super) dc: Vec<RuntimeUpstreamQualityDcData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) policy: RuntimeUpstreamQualityPolicyData,
|
||||
pub(super) counters: RuntimeUpstreamQualityCountersData,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) summary: Option<RuntimeUpstreamQualitySummaryData>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) upstreams: Option<Vec<RuntimeUpstreamQualityUpstreamData>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunReflectionData {
|
||||
pub(super) addr: String,
|
||||
pub(super) age_secs: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunFlagsData {
|
||||
pub(super) nat_probe_enabled: bool,
|
||||
pub(super) nat_probe_disabled_runtime: bool,
|
||||
pub(super) nat_probe_attempts: u8,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunServersData {
|
||||
pub(super) configured: Vec<String>,
|
||||
pub(super) live: Vec<String>,
|
||||
pub(super) live_total: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunReflectionBlockData {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) v4: Option<RuntimeNatStunReflectionData>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) v6: Option<RuntimeNatStunReflectionData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunPayload {
|
||||
pub(super) flags: RuntimeNatStunFlagsData,
|
||||
pub(super) servers: RuntimeNatStunServersData,
|
||||
pub(super) reflection: RuntimeNatStunReflectionBlockData,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) stun_backoff_remaining_ms: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeNatStunPayload>,
|
||||
}
|
||||
|
||||
pub(super) fn build_security_whitelist_data(cfg: &ProxyConfig) -> SecurityWhitelistData {
|
||||
let entries = cfg
|
||||
.server
|
||||
.api
|
||||
.whitelist
|
||||
.iter()
|
||||
.map(ToString::to_string)
|
||||
.collect::<Vec<_>>();
|
||||
SecurityWhitelistData {
|
||||
generated_at_epoch_secs: now_epoch_secs(),
|
||||
enabled: !entries.is_empty(),
|
||||
entries_total: entries.len(),
|
||||
entries,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_me_pool_state_data(shared: &ApiShared) -> RuntimeMePoolStateData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let Some(pool) = shared.me_pool.read().await.clone() else {
|
||||
return RuntimeMePoolStateData {
|
||||
enabled: false,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
};
|
||||
|
||||
let status = pool.api_status_snapshot().await;
|
||||
let runtime = pool.api_runtime_snapshot().await;
|
||||
let refill = pool.api_refill_snapshot().await;
|
||||
|
||||
let mut draining_generations = BTreeSet::<u64>::new();
|
||||
let mut contour_warm = 0usize;
|
||||
let mut contour_active = 0usize;
|
||||
let mut contour_draining = 0usize;
|
||||
let mut draining = 0usize;
|
||||
let mut degraded = 0usize;
|
||||
let mut healthy = 0usize;
|
||||
|
||||
for writer in &status.writers {
|
||||
if writer.draining {
|
||||
draining_generations.insert(writer.generation);
|
||||
draining += 1;
|
||||
}
|
||||
if writer.degraded && !writer.draining {
|
||||
degraded += 1;
|
||||
}
|
||||
if !writer.degraded && !writer.draining {
|
||||
healthy += 1;
|
||||
}
|
||||
match writer.state {
|
||||
"warm" => contour_warm += 1,
|
||||
"active" => contour_active += 1,
|
||||
_ => contour_draining += 1,
|
||||
}
|
||||
}
|
||||
|
||||
RuntimeMePoolStateData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs: status.generated_at_epoch_secs,
|
||||
data: Some(RuntimeMePoolStatePayload {
|
||||
generations: RuntimeMePoolStateGenerationData {
|
||||
active_generation: runtime.active_generation,
|
||||
warm_generation: runtime.warm_generation,
|
||||
pending_hardswap_generation: runtime.pending_hardswap_generation,
|
||||
pending_hardswap_age_secs: runtime.pending_hardswap_age_secs,
|
||||
draining_generations: draining_generations.into_iter().collect(),
|
||||
},
|
||||
hardswap: RuntimeMePoolStateHardswapData {
|
||||
enabled: runtime.hardswap_enabled,
|
||||
pending: runtime.pending_hardswap_generation != 0,
|
||||
},
|
||||
writers: RuntimeMePoolStateWriterData {
|
||||
total: status.writers.len(),
|
||||
alive_non_draining: status.writers.len().saturating_sub(draining),
|
||||
draining,
|
||||
degraded,
|
||||
contour: RuntimeMePoolStateWriterContourData {
|
||||
warm: contour_warm,
|
||||
active: contour_active,
|
||||
draining: contour_draining,
|
||||
},
|
||||
health: RuntimeMePoolStateWriterHealthData {
|
||||
healthy,
|
||||
degraded,
|
||||
draining,
|
||||
},
|
||||
},
|
||||
refill: RuntimeMePoolStateRefillData {
|
||||
inflight_endpoints_total: refill.inflight_endpoints_total,
|
||||
inflight_dc_total: refill.inflight_dc_total,
|
||||
by_dc: refill
|
||||
.by_dc
|
||||
.into_iter()
|
||||
.map(|entry| RuntimeMePoolStateRefillDcData {
|
||||
dc: entry.dc,
|
||||
family: entry.family,
|
||||
inflight: entry.inflight,
|
||||
})
|
||||
.collect(),
|
||||
},
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_me_quality_data(shared: &ApiShared) -> RuntimeMeQualityData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let Some(pool) = shared.me_pool.read().await.clone() else {
|
||||
return RuntimeMeQualityData {
|
||||
enabled: false,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
};
|
||||
|
||||
let status = pool.api_status_snapshot().await;
|
||||
let family_states = pool
|
||||
.api_family_state_snapshot()
|
||||
.into_iter()
|
||||
.map(|entry| RuntimeMeQualityFamilyStateData {
|
||||
family: entry.family,
|
||||
state: entry.state,
|
||||
state_since_epoch_secs: entry.state_since_epoch_secs,
|
||||
suppressed_until_epoch_secs: entry.suppressed_until_epoch_secs,
|
||||
fail_streak: entry.fail_streak,
|
||||
recover_success_streak: entry.recover_success_streak,
|
||||
})
|
||||
.collect();
|
||||
let drain_gate_snapshot = pool.api_drain_gate_snapshot();
|
||||
RuntimeMeQualityData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs: status.generated_at_epoch_secs,
|
||||
data: Some(RuntimeMeQualityPayload {
|
||||
counters: RuntimeMeQualityCountersData {
|
||||
idle_close_by_peer_total: shared.stats.get_me_idle_close_by_peer_total(),
|
||||
reader_eof_total: shared.stats.get_me_reader_eof_total(),
|
||||
kdf_drift_total: shared.stats.get_me_kdf_drift_total(),
|
||||
kdf_port_only_drift_total: shared.stats.get_me_kdf_port_only_drift_total(),
|
||||
reconnect_attempt_total: shared.stats.get_me_reconnect_attempts(),
|
||||
reconnect_success_total: shared.stats.get_me_reconnect_success(),
|
||||
},
|
||||
route_drops: RuntimeMeQualityRouteDropData {
|
||||
no_conn_total: shared.stats.get_me_route_drop_no_conn(),
|
||||
channel_closed_total: shared.stats.get_me_route_drop_channel_closed(),
|
||||
queue_full_total: shared.stats.get_me_route_drop_queue_full(),
|
||||
queue_full_base_total: shared.stats.get_me_route_drop_queue_full_base(),
|
||||
queue_full_high_total: shared.stats.get_me_route_drop_queue_full_high(),
|
||||
},
|
||||
family_states,
|
||||
drain_gate: RuntimeMeQualityDrainGateData {
|
||||
route_quorum_ok: drain_gate_snapshot.route_quorum_ok,
|
||||
redundancy_ok: drain_gate_snapshot.redundancy_ok,
|
||||
block_reason: drain_gate_snapshot.block_reason,
|
||||
updated_at_epoch_secs: drain_gate_snapshot.updated_at_epoch_secs,
|
||||
},
|
||||
dc_rtt: status
|
||||
.dcs
|
||||
.into_iter()
|
||||
.map(|dc| RuntimeMeQualityDcRttData {
|
||||
dc: dc.dc,
|
||||
rtt_ema_ms: dc.rtt_ms,
|
||||
alive_writers: dc.alive_writers,
|
||||
required_writers: dc.required_writers,
|
||||
coverage_pct: dc.coverage_pct,
|
||||
})
|
||||
.collect(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_upstream_quality_data(
|
||||
shared: &ApiShared,
|
||||
) -> RuntimeUpstreamQualityData {
|
||||
let generated_at_epoch_secs = now_epoch_secs();
|
||||
let policy = shared.upstream_manager.api_policy_snapshot();
|
||||
let counters = RuntimeUpstreamQualityCountersData {
|
||||
connect_attempt_total: shared.stats.get_upstream_connect_attempt_total(),
|
||||
connect_success_total: shared.stats.get_upstream_connect_success_total(),
|
||||
connect_fail_total: shared.stats.get_upstream_connect_fail_total(),
|
||||
connect_failfast_hard_error_total: shared
|
||||
.stats
|
||||
.get_upstream_connect_failfast_hard_error_total(),
|
||||
};
|
||||
|
||||
let Some(snapshot) = shared.upstream_manager.try_api_snapshot() else {
|
||||
return RuntimeUpstreamQualityData {
|
||||
enabled: false,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs,
|
||||
policy: RuntimeUpstreamQualityPolicyData {
|
||||
connect_retry_attempts: policy.connect_retry_attempts,
|
||||
connect_retry_backoff_ms: policy.connect_retry_backoff_ms,
|
||||
connect_budget_ms: policy.connect_budget_ms,
|
||||
unhealthy_fail_threshold: policy.unhealthy_fail_threshold,
|
||||
connect_failfast_hard_errors: policy.connect_failfast_hard_errors,
|
||||
},
|
||||
counters,
|
||||
summary: None,
|
||||
upstreams: None,
|
||||
};
|
||||
};
|
||||
|
||||
RuntimeUpstreamQualityData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
policy: RuntimeUpstreamQualityPolicyData {
|
||||
connect_retry_attempts: policy.connect_retry_attempts,
|
||||
connect_retry_backoff_ms: policy.connect_retry_backoff_ms,
|
||||
connect_budget_ms: policy.connect_budget_ms,
|
||||
unhealthy_fail_threshold: policy.unhealthy_fail_threshold,
|
||||
connect_failfast_hard_errors: policy.connect_failfast_hard_errors,
|
||||
},
|
||||
counters,
|
||||
summary: Some(RuntimeUpstreamQualitySummaryData {
|
||||
configured_total: snapshot.summary.configured_total,
|
||||
healthy_total: snapshot.summary.healthy_total,
|
||||
unhealthy_total: snapshot.summary.unhealthy_total,
|
||||
direct_total: snapshot.summary.direct_total,
|
||||
socks4_total: snapshot.summary.socks4_total,
|
||||
socks5_total: snapshot.summary.socks5_total,
|
||||
shadowsocks_total: snapshot.summary.shadowsocks_total,
|
||||
}),
|
||||
upstreams: Some(
|
||||
snapshot
|
||||
.upstreams
|
||||
.into_iter()
|
||||
.map(|upstream| RuntimeUpstreamQualityUpstreamData {
|
||||
upstream_id: upstream.upstream_id,
|
||||
route_kind: match upstream.route_kind {
|
||||
crate::transport::UpstreamRouteKind::Direct => "direct",
|
||||
crate::transport::UpstreamRouteKind::Socks4 => "socks4",
|
||||
crate::transport::UpstreamRouteKind::Socks5 => "socks5",
|
||||
crate::transport::UpstreamRouteKind::Shadowsocks => "shadowsocks",
|
||||
},
|
||||
address: upstream.address,
|
||||
weight: upstream.weight,
|
||||
scopes: upstream.scopes,
|
||||
healthy: upstream.healthy,
|
||||
fails: upstream.fails,
|
||||
last_check_age_secs: upstream.last_check_age_secs,
|
||||
effective_latency_ms: upstream.effective_latency_ms,
|
||||
dc: upstream
|
||||
.dc
|
||||
.into_iter()
|
||||
.map(|dc| RuntimeUpstreamQualityDcData {
|
||||
dc: dc.dc,
|
||||
latency_ema_ms: dc.latency_ema_ms,
|
||||
ip_preference: match dc.ip_preference {
|
||||
crate::transport::upstream::IpPreference::Unknown => "unknown",
|
||||
crate::transport::upstream::IpPreference::PreferV6 => "prefer_v6",
|
||||
crate::transport::upstream::IpPreference::PreferV4 => "prefer_v4",
|
||||
crate::transport::upstream::IpPreference::BothWork => "both_work",
|
||||
crate::transport::upstream::IpPreference::Unavailable => {
|
||||
"unavailable"
|
||||
}
|
||||
},
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
.collect(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_nat_stun_data(shared: &ApiShared) -> RuntimeNatStunData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let Some(pool) = shared.me_pool.read().await.clone() else {
|
||||
return RuntimeNatStunData {
|
||||
enabled: false,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
};
|
||||
|
||||
let snapshot = pool.api_nat_stun_snapshot().await;
|
||||
RuntimeNatStunData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: Some(RuntimeNatStunPayload {
|
||||
flags: RuntimeNatStunFlagsData {
|
||||
nat_probe_enabled: snapshot.nat_probe_enabled,
|
||||
nat_probe_disabled_runtime: snapshot.nat_probe_disabled_runtime,
|
||||
nat_probe_attempts: snapshot.nat_probe_attempts,
|
||||
},
|
||||
servers: RuntimeNatStunServersData {
|
||||
configured: snapshot.configured_servers,
|
||||
live: snapshot.live_servers.clone(),
|
||||
live_total: snapshot.live_servers.len(),
|
||||
},
|
||||
reflection: RuntimeNatStunReflectionBlockData {
|
||||
v4: snapshot
|
||||
.reflection_v4
|
||||
.map(|entry| RuntimeNatStunReflectionData {
|
||||
addr: entry.addr.to_string(),
|
||||
age_secs: entry.age_secs,
|
||||
}),
|
||||
v6: snapshot
|
||||
.reflection_v6
|
||||
.map(|entry| RuntimeNatStunReflectionData {
|
||||
addr: entry.addr.to_string(),
|
||||
age_secs: entry.age_secs,
|
||||
}),
|
||||
},
|
||||
stun_backoff_remaining_ms: snapshot.stun_backoff_remaining_ms,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
300
src/api/runtime_selftest.rs
Normal file
300
src/api/runtime_selftest.rs
Normal file
@@ -0,0 +1,300 @@
|
||||
use std::collections::HashMap;
|
||||
use std::net::IpAddr;
|
||||
use std::sync::{Mutex, OnceLock};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::config::{ProxyConfig, UpstreamType};
|
||||
use crate::network::probe::{detect_interface_ipv4, detect_interface_ipv6, is_bogon};
|
||||
use crate::transport::UpstreamRouteKind;
|
||||
use crate::transport::middle_proxy::{bnd_snapshot, timeskew_snapshot, upstream_bnd_snapshots};
|
||||
|
||||
use super::ApiShared;
|
||||
|
||||
const SOURCE_UNAVAILABLE_REASON: &str = "source_unavailable";
|
||||
const KDF_EWMA_TAU_SECS: f64 = 600.0;
|
||||
const KDF_EWMA_THRESHOLD_ERRORS_PER_MIN: f64 = 0.30;
|
||||
const TIMESKEW_THRESHOLD_SECS: u64 = 60;
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeSelftestKdfData {
|
||||
pub(super) state: &'static str,
|
||||
pub(super) ewma_errors_per_min: f64,
|
||||
pub(super) threshold_errors_per_min: f64,
|
||||
pub(super) errors_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeSelftestTimeskewData {
|
||||
pub(super) state: &'static str,
|
||||
pub(super) max_skew_secs_15m: Option<u64>,
|
||||
pub(super) samples_15m: usize,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) last_skew_secs: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) last_source: Option<&'static str>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) last_seen_age_secs: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeSelftestIpFamilyData {
|
||||
pub(super) addr: String,
|
||||
pub(super) state: &'static str,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeSelftestIpData {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) v4: Option<RuntimeMeSelftestIpFamilyData>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) v6: Option<RuntimeMeSelftestIpFamilyData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeSelftestPidData {
|
||||
pub(super) pid: u32,
|
||||
pub(super) state: &'static str,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeSelftestBndData {
|
||||
pub(super) addr_state: &'static str,
|
||||
pub(super) port_state: &'static str,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) last_addr: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) last_seen_age_secs: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeSelftestUpstreamData {
|
||||
pub(super) upstream_id: usize,
|
||||
pub(super) route_kind: &'static str,
|
||||
pub(super) address: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) bnd: Option<RuntimeMeSelftestBndData>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) ip: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeSelftestPayload {
|
||||
pub(super) kdf: RuntimeMeSelftestKdfData,
|
||||
pub(super) timeskew: RuntimeMeSelftestTimeskewData,
|
||||
pub(super) ip: RuntimeMeSelftestIpData,
|
||||
pub(super) pid: RuntimeMeSelftestPidData,
|
||||
pub(super) bnd: Option<RuntimeMeSelftestBndData>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) upstreams: Option<Vec<RuntimeMeSelftestUpstreamData>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeSelftestData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeMeSelftestPayload>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct KdfEwmaState {
|
||||
initialized: bool,
|
||||
last_epoch_secs: u64,
|
||||
last_total_errors: u64,
|
||||
ewma_errors_per_min: f64,
|
||||
}
|
||||
|
||||
static KDF_EWMA_STATE: OnceLock<Mutex<KdfEwmaState>> = OnceLock::new();
|
||||
|
||||
fn kdf_ewma_state() -> &'static Mutex<KdfEwmaState> {
|
||||
KDF_EWMA_STATE.get_or_init(|| Mutex::new(KdfEwmaState::default()))
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_me_selftest_data(
|
||||
shared: &ApiShared,
|
||||
cfg: &ProxyConfig,
|
||||
) -> RuntimeMeSelftestData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
if shared.me_pool.read().await.is_none() {
|
||||
return RuntimeMeSelftestData {
|
||||
enabled: false,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
}
|
||||
|
||||
let kdf_errors_total = shared
|
||||
.stats
|
||||
.get_me_kdf_drift_total()
|
||||
.saturating_add(shared.stats.get_me_socks_kdf_strict_reject());
|
||||
let kdf_ewma = update_kdf_ewma(now_epoch_secs, kdf_errors_total);
|
||||
let kdf_state = if kdf_ewma >= KDF_EWMA_THRESHOLD_ERRORS_PER_MIN {
|
||||
"error"
|
||||
} else {
|
||||
"ok"
|
||||
};
|
||||
|
||||
let skew = timeskew_snapshot();
|
||||
let timeskew_state = if skew.max_skew_secs_15m.unwrap_or(0) > TIMESKEW_THRESHOLD_SECS {
|
||||
"error"
|
||||
} else {
|
||||
"ok"
|
||||
};
|
||||
|
||||
let ip_v4 = detect_interface_ipv4().map(|ip| RuntimeMeSelftestIpFamilyData {
|
||||
addr: ip.to_string(),
|
||||
state: classify_ip(IpAddr::V4(ip)),
|
||||
});
|
||||
let ip_v6 = detect_interface_ipv6().map(|ip| RuntimeMeSelftestIpFamilyData {
|
||||
addr: ip.to_string(),
|
||||
state: classify_ip(IpAddr::V6(ip)),
|
||||
});
|
||||
|
||||
let pid = std::process::id();
|
||||
let pid_state = if pid == 1 { "one" } else { "non-one" };
|
||||
|
||||
let has_socks_upstreams = cfg.upstreams.iter().any(|upstream| {
|
||||
upstream.enabled
|
||||
&& matches!(
|
||||
upstream.upstream_type,
|
||||
UpstreamType::Socks4 { .. } | UpstreamType::Socks5 { .. }
|
||||
)
|
||||
});
|
||||
|
||||
let bnd = if has_socks_upstreams {
|
||||
let snapshot = bnd_snapshot();
|
||||
Some(RuntimeMeSelftestBndData {
|
||||
addr_state: snapshot.addr_status,
|
||||
port_state: snapshot.port_status,
|
||||
last_addr: snapshot.last_addr.map(|value| value.to_string()),
|
||||
last_seen_age_secs: snapshot.last_seen_age_secs,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let upstreams = build_upstream_selftest_data(shared);
|
||||
|
||||
RuntimeMeSelftestData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: Some(RuntimeMeSelftestPayload {
|
||||
kdf: RuntimeMeSelftestKdfData {
|
||||
state: kdf_state,
|
||||
ewma_errors_per_min: round3(kdf_ewma),
|
||||
threshold_errors_per_min: KDF_EWMA_THRESHOLD_ERRORS_PER_MIN,
|
||||
errors_total: kdf_errors_total,
|
||||
},
|
||||
timeskew: RuntimeMeSelftestTimeskewData {
|
||||
state: timeskew_state,
|
||||
max_skew_secs_15m: skew.max_skew_secs_15m,
|
||||
samples_15m: skew.samples_15m,
|
||||
last_skew_secs: skew.last_skew_secs,
|
||||
last_source: skew.last_source,
|
||||
last_seen_age_secs: skew.last_seen_age_secs,
|
||||
},
|
||||
ip: RuntimeMeSelftestIpData {
|
||||
v4: ip_v4,
|
||||
v6: ip_v6,
|
||||
},
|
||||
pid: RuntimeMeSelftestPidData {
|
||||
pid,
|
||||
state: pid_state,
|
||||
},
|
||||
bnd,
|
||||
upstreams,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_upstream_selftest_data(shared: &ApiShared) -> Option<Vec<RuntimeMeSelftestUpstreamData>> {
|
||||
let snapshot = shared.upstream_manager.try_api_snapshot()?;
|
||||
if snapshot.summary.configured_total <= 1 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut upstream_bnd_by_id: HashMap<usize, _> = upstream_bnd_snapshots()
|
||||
.into_iter()
|
||||
.map(|entry| (entry.upstream_id, entry))
|
||||
.collect();
|
||||
let mut rows = Vec::with_capacity(snapshot.upstreams.len());
|
||||
for upstream in snapshot.upstreams {
|
||||
let upstream_bnd = upstream_bnd_by_id.remove(&upstream.upstream_id);
|
||||
rows.push(RuntimeMeSelftestUpstreamData {
|
||||
upstream_id: upstream.upstream_id,
|
||||
route_kind: map_route_kind(upstream.route_kind),
|
||||
address: upstream.address,
|
||||
bnd: upstream_bnd.as_ref().map(|entry| RuntimeMeSelftestBndData {
|
||||
addr_state: entry.addr_status,
|
||||
port_state: entry.port_status,
|
||||
last_addr: entry.last_addr.map(|value| value.to_string()),
|
||||
last_seen_age_secs: entry.last_seen_age_secs,
|
||||
}),
|
||||
ip: upstream_bnd.and_then(|entry| entry.last_ip.map(|value| value.to_string())),
|
||||
});
|
||||
}
|
||||
Some(rows)
|
||||
}
|
||||
|
||||
fn update_kdf_ewma(now_epoch_secs: u64, total_errors: u64) -> f64 {
|
||||
let Ok(mut guard) = kdf_ewma_state().lock() else {
|
||||
return 0.0;
|
||||
};
|
||||
|
||||
if !guard.initialized {
|
||||
guard.initialized = true;
|
||||
guard.last_epoch_secs = now_epoch_secs;
|
||||
guard.last_total_errors = total_errors;
|
||||
guard.ewma_errors_per_min = 0.0;
|
||||
return guard.ewma_errors_per_min;
|
||||
}
|
||||
|
||||
let dt_secs = now_epoch_secs.saturating_sub(guard.last_epoch_secs);
|
||||
if dt_secs == 0 {
|
||||
return guard.ewma_errors_per_min;
|
||||
}
|
||||
|
||||
let delta_errors = total_errors.saturating_sub(guard.last_total_errors);
|
||||
let instant_rate_per_min = (delta_errors as f64) * 60.0 / (dt_secs as f64);
|
||||
let alpha = 1.0 - f64::exp(-(dt_secs as f64) / KDF_EWMA_TAU_SECS);
|
||||
guard.ewma_errors_per_min =
|
||||
guard.ewma_errors_per_min + alpha * (instant_rate_per_min - guard.ewma_errors_per_min);
|
||||
guard.last_epoch_secs = now_epoch_secs;
|
||||
guard.last_total_errors = total_errors;
|
||||
guard.ewma_errors_per_min
|
||||
}
|
||||
|
||||
fn classify_ip(ip: IpAddr) -> &'static str {
|
||||
if ip.is_loopback() {
|
||||
return "loopback";
|
||||
}
|
||||
if is_bogon(ip) {
|
||||
return "bogon";
|
||||
}
|
||||
"good"
|
||||
}
|
||||
|
||||
fn map_route_kind(value: UpstreamRouteKind) -> &'static str {
|
||||
match value {
|
||||
UpstreamRouteKind::Direct => "direct",
|
||||
UpstreamRouteKind::Socks4 => "socks4",
|
||||
UpstreamRouteKind::Socks5 => "socks5",
|
||||
UpstreamRouteKind::Shadowsocks => "shadowsocks",
|
||||
}
|
||||
}
|
||||
|
||||
fn round3(value: f64) -> f64 {
|
||||
(value * 1000.0).round() / 1000.0
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
@@ -2,11 +2,14 @@ use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use crate::config::ApiConfig;
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::UpstreamRouteKind;
|
||||
use crate::transport::upstream::IpPreference;
|
||||
|
||||
use super::ApiShared;
|
||||
use super::model::{
|
||||
DcStatus, DcStatusData, MeWriterStatus, MeWritersData, MeWritersSummary, MinimalAllData,
|
||||
MinimalAllPayload, MinimalDcPathData, MinimalMeRuntimeData, MinimalQuarantineData,
|
||||
DcEndpointWriters, DcStatus, DcStatusData, MeWriterStatus, MeWritersData, MeWritersSummary,
|
||||
MinimalAllData, MinimalAllPayload, MinimalDcPathData, MinimalMeRuntimeData,
|
||||
MinimalQuarantineData, UpstreamDcStatus, UpstreamStatus, UpstreamSummaryData, UpstreamsData,
|
||||
ZeroAllData, ZeroCodeCount, ZeroCoreData, ZeroDesyncData, ZeroMiddleProxyData, ZeroPoolData,
|
||||
ZeroUpstreamData,
|
||||
};
|
||||
@@ -41,32 +44,7 @@ pub(super) fn build_zero_all_data(stats: &Stats, configured_users: usize) -> Zer
|
||||
telemetry_user_enabled: telemetry.user_enabled,
|
||||
telemetry_me_level: telemetry.me_level.to_string(),
|
||||
},
|
||||
upstream: ZeroUpstreamData {
|
||||
connect_attempt_total: stats.get_upstream_connect_attempt_total(),
|
||||
connect_success_total: stats.get_upstream_connect_success_total(),
|
||||
connect_fail_total: stats.get_upstream_connect_fail_total(),
|
||||
connect_failfast_hard_error_total: stats.get_upstream_connect_failfast_hard_error_total(),
|
||||
connect_attempts_bucket_1: stats.get_upstream_connect_attempts_bucket_1(),
|
||||
connect_attempts_bucket_2: stats.get_upstream_connect_attempts_bucket_2(),
|
||||
connect_attempts_bucket_3_4: stats.get_upstream_connect_attempts_bucket_3_4(),
|
||||
connect_attempts_bucket_gt_4: stats.get_upstream_connect_attempts_bucket_gt_4(),
|
||||
connect_duration_success_bucket_le_100ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_le_100ms(),
|
||||
connect_duration_success_bucket_101_500ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_101_500ms(),
|
||||
connect_duration_success_bucket_501_1000ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_501_1000ms(),
|
||||
connect_duration_success_bucket_gt_1000ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_gt_1000ms(),
|
||||
connect_duration_fail_bucket_le_100ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_le_100ms(),
|
||||
connect_duration_fail_bucket_101_500ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_101_500ms(),
|
||||
connect_duration_fail_bucket_501_1000ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_501_1000ms(),
|
||||
connect_duration_fail_bucket_gt_1000ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_gt_1000ms(),
|
||||
},
|
||||
upstream: build_zero_upstream_data(stats),
|
||||
middle_proxy: ZeroMiddleProxyData {
|
||||
keepalive_sent_total: stats.get_me_keepalive_sent(),
|
||||
keepalive_failed_total: stats.get_me_keepalive_failed(),
|
||||
@@ -90,6 +68,25 @@ pub(super) fn build_zero_all_data(stats: &Stats, configured_users: usize) -> Zer
|
||||
route_drop_queue_full_total: stats.get_me_route_drop_queue_full(),
|
||||
route_drop_queue_full_base_total: stats.get_me_route_drop_queue_full_base(),
|
||||
route_drop_queue_full_high_total: stats.get_me_route_drop_queue_full_high(),
|
||||
d2c_batches_total: stats.get_me_d2c_batches_total(),
|
||||
d2c_batch_frames_total: stats.get_me_d2c_batch_frames_total(),
|
||||
d2c_batch_bytes_total: stats.get_me_d2c_batch_bytes_total(),
|
||||
d2c_flush_reason_queue_drain_total: stats.get_me_d2c_flush_reason_queue_drain_total(),
|
||||
d2c_flush_reason_batch_frames_total: stats.get_me_d2c_flush_reason_batch_frames_total(),
|
||||
d2c_flush_reason_batch_bytes_total: stats.get_me_d2c_flush_reason_batch_bytes_total(),
|
||||
d2c_flush_reason_max_delay_total: stats.get_me_d2c_flush_reason_max_delay_total(),
|
||||
d2c_flush_reason_ack_immediate_total: stats
|
||||
.get_me_d2c_flush_reason_ack_immediate_total(),
|
||||
d2c_flush_reason_close_total: stats.get_me_d2c_flush_reason_close_total(),
|
||||
d2c_data_frames_total: stats.get_me_d2c_data_frames_total(),
|
||||
d2c_ack_frames_total: stats.get_me_d2c_ack_frames_total(),
|
||||
d2c_payload_bytes_total: stats.get_me_d2c_payload_bytes_total(),
|
||||
d2c_write_mode_coalesced_total: stats.get_me_d2c_write_mode_coalesced_total(),
|
||||
d2c_write_mode_split_total: stats.get_me_d2c_write_mode_split_total(),
|
||||
d2c_quota_reject_pre_write_total: stats.get_me_d2c_quota_reject_pre_write_total(),
|
||||
d2c_quota_reject_post_write_total: stats.get_me_d2c_quota_reject_post_write_total(),
|
||||
d2c_frame_buf_shrink_total: stats.get_me_d2c_frame_buf_shrink_total(),
|
||||
d2c_frame_buf_shrink_bytes_total: stats.get_me_d2c_frame_buf_shrink_bytes_total(),
|
||||
socks_kdf_strict_reject_total: stats.get_me_socks_kdf_strict_reject(),
|
||||
socks_kdf_compat_fallback_total: stats.get_me_socks_kdf_compat_fallback(),
|
||||
endpoint_quarantine_total: stats.get_me_endpoint_quarantine_total(),
|
||||
@@ -140,6 +137,104 @@ pub(super) fn build_zero_all_data(stats: &Stats, configured_users: usize) -> Zer
|
||||
}
|
||||
}
|
||||
|
||||
fn build_zero_upstream_data(stats: &Stats) -> ZeroUpstreamData {
|
||||
ZeroUpstreamData {
|
||||
connect_attempt_total: stats.get_upstream_connect_attempt_total(),
|
||||
connect_success_total: stats.get_upstream_connect_success_total(),
|
||||
connect_fail_total: stats.get_upstream_connect_fail_total(),
|
||||
connect_failfast_hard_error_total: stats.get_upstream_connect_failfast_hard_error_total(),
|
||||
connect_attempts_bucket_1: stats.get_upstream_connect_attempts_bucket_1(),
|
||||
connect_attempts_bucket_2: stats.get_upstream_connect_attempts_bucket_2(),
|
||||
connect_attempts_bucket_3_4: stats.get_upstream_connect_attempts_bucket_3_4(),
|
||||
connect_attempts_bucket_gt_4: stats.get_upstream_connect_attempts_bucket_gt_4(),
|
||||
connect_duration_success_bucket_le_100ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_le_100ms(),
|
||||
connect_duration_success_bucket_101_500ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_101_500ms(),
|
||||
connect_duration_success_bucket_501_1000ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_501_1000ms(),
|
||||
connect_duration_success_bucket_gt_1000ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_gt_1000ms(),
|
||||
connect_duration_fail_bucket_le_100ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_le_100ms(),
|
||||
connect_duration_fail_bucket_101_500ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_101_500ms(),
|
||||
connect_duration_fail_bucket_501_1000ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_501_1000ms(),
|
||||
connect_duration_fail_bucket_gt_1000ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_gt_1000ms(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn build_upstreams_data(shared: &ApiShared, api_cfg: &ApiConfig) -> UpstreamsData {
|
||||
let generated_at_epoch_secs = now_epoch_secs();
|
||||
let zero = build_zero_upstream_data(&shared.stats);
|
||||
if !api_cfg.minimal_runtime_enabled {
|
||||
return UpstreamsData {
|
||||
enabled: false,
|
||||
reason: Some(FEATURE_DISABLED_REASON),
|
||||
generated_at_epoch_secs,
|
||||
zero,
|
||||
summary: None,
|
||||
upstreams: None,
|
||||
};
|
||||
}
|
||||
|
||||
let Some(snapshot) = shared.upstream_manager.try_api_snapshot() else {
|
||||
return UpstreamsData {
|
||||
enabled: true,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs,
|
||||
zero,
|
||||
summary: None,
|
||||
upstreams: None,
|
||||
};
|
||||
};
|
||||
|
||||
let summary = UpstreamSummaryData {
|
||||
configured_total: snapshot.summary.configured_total,
|
||||
healthy_total: snapshot.summary.healthy_total,
|
||||
unhealthy_total: snapshot.summary.unhealthy_total,
|
||||
direct_total: snapshot.summary.direct_total,
|
||||
socks4_total: snapshot.summary.socks4_total,
|
||||
socks5_total: snapshot.summary.socks5_total,
|
||||
shadowsocks_total: snapshot.summary.shadowsocks_total,
|
||||
};
|
||||
let upstreams = snapshot
|
||||
.upstreams
|
||||
.into_iter()
|
||||
.map(|upstream| UpstreamStatus {
|
||||
upstream_id: upstream.upstream_id,
|
||||
route_kind: map_route_kind(upstream.route_kind),
|
||||
address: upstream.address,
|
||||
weight: upstream.weight,
|
||||
scopes: upstream.scopes,
|
||||
healthy: upstream.healthy,
|
||||
fails: upstream.fails,
|
||||
last_check_age_secs: upstream.last_check_age_secs,
|
||||
effective_latency_ms: upstream.effective_latency_ms,
|
||||
dc: upstream
|
||||
.dc
|
||||
.into_iter()
|
||||
.map(|dc| UpstreamDcStatus {
|
||||
dc: dc.dc,
|
||||
latency_ema_ms: dc.latency_ema_ms,
|
||||
ip_preference: map_ip_preference(dc.ip_preference),
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
UpstreamsData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
zero,
|
||||
summary: Some(summary),
|
||||
upstreams: Some(upstreams),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_minimal_all_data(
|
||||
shared: &ApiShared,
|
||||
api_cfg: &ApiConfig,
|
||||
@@ -223,7 +318,7 @@ async fn get_minimal_payload_cached(
|
||||
}
|
||||
}
|
||||
|
||||
let pool = shared.me_pool.as_ref()?;
|
||||
let pool = shared.me_pool.read().await.clone()?;
|
||||
let status = pool.api_status_snapshot().await;
|
||||
let runtime = pool.api_runtime_snapshot().await;
|
||||
let generated_at_epoch_secs = status.generated_at_epoch_secs;
|
||||
@@ -240,6 +335,8 @@ async fn get_minimal_payload_cached(
|
||||
required_writers: status.required_writers,
|
||||
alive_writers: status.alive_writers,
|
||||
coverage_pct: status.coverage_pct,
|
||||
fresh_alive_writers: status.fresh_alive_writers,
|
||||
fresh_coverage_pct: status.fresh_coverage_pct,
|
||||
},
|
||||
writers: status
|
||||
.writers
|
||||
@@ -255,6 +352,12 @@ async fn get_minimal_payload_cached(
|
||||
bound_clients: entry.bound_clients,
|
||||
idle_for_secs: entry.idle_for_secs,
|
||||
rtt_ema_ms: entry.rtt_ema_ms,
|
||||
matches_active_generation: entry.matches_active_generation,
|
||||
in_desired_map: entry.in_desired_map,
|
||||
allow_drain_fallback: entry.allow_drain_fallback,
|
||||
drain_started_at_epoch_secs: entry.drain_started_at_epoch_secs,
|
||||
drain_deadline_epoch_secs: entry.drain_deadline_epoch_secs,
|
||||
drain_over_ttl: entry.drain_over_ttl,
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
@@ -272,11 +375,25 @@ async fn get_minimal_payload_cached(
|
||||
.into_iter()
|
||||
.map(|value| value.to_string())
|
||||
.collect(),
|
||||
endpoint_writers: entry
|
||||
.endpoint_writers
|
||||
.into_iter()
|
||||
.map(|coverage| DcEndpointWriters {
|
||||
endpoint: coverage.endpoint.to_string(),
|
||||
active_writers: coverage.active_writers,
|
||||
})
|
||||
.collect(),
|
||||
available_endpoints: entry.available_endpoints,
|
||||
available_pct: entry.available_pct,
|
||||
required_writers: entry.required_writers,
|
||||
floor_min: entry.floor_min,
|
||||
floor_target: entry.floor_target,
|
||||
floor_max: entry.floor_max,
|
||||
floor_capped: entry.floor_capped,
|
||||
alive_writers: entry.alive_writers,
|
||||
coverage_pct: entry.coverage_pct,
|
||||
fresh_alive_writers: entry.fresh_alive_writers,
|
||||
fresh_coverage_pct: entry.fresh_coverage_pct,
|
||||
rtt_ms: entry.rtt_ms,
|
||||
load: entry.load,
|
||||
})
|
||||
@@ -292,7 +409,31 @@ async fn get_minimal_payload_cached(
|
||||
adaptive_floor_idle_secs: runtime.adaptive_floor_idle_secs,
|
||||
adaptive_floor_min_writers_single_endpoint: runtime
|
||||
.adaptive_floor_min_writers_single_endpoint,
|
||||
adaptive_floor_min_writers_multi_endpoint: runtime
|
||||
.adaptive_floor_min_writers_multi_endpoint,
|
||||
adaptive_floor_recover_grace_secs: runtime.adaptive_floor_recover_grace_secs,
|
||||
adaptive_floor_writers_per_core_total: runtime.adaptive_floor_writers_per_core_total,
|
||||
adaptive_floor_cpu_cores_override: runtime.adaptive_floor_cpu_cores_override,
|
||||
adaptive_floor_max_extra_writers_single_per_core: runtime
|
||||
.adaptive_floor_max_extra_writers_single_per_core,
|
||||
adaptive_floor_max_extra_writers_multi_per_core: runtime
|
||||
.adaptive_floor_max_extra_writers_multi_per_core,
|
||||
adaptive_floor_max_active_writers_per_core: runtime
|
||||
.adaptive_floor_max_active_writers_per_core,
|
||||
adaptive_floor_max_warm_writers_per_core: runtime.adaptive_floor_max_warm_writers_per_core,
|
||||
adaptive_floor_max_active_writers_global: runtime.adaptive_floor_max_active_writers_global,
|
||||
adaptive_floor_max_warm_writers_global: runtime.adaptive_floor_max_warm_writers_global,
|
||||
adaptive_floor_cpu_cores_detected: runtime.adaptive_floor_cpu_cores_detected,
|
||||
adaptive_floor_cpu_cores_effective: runtime.adaptive_floor_cpu_cores_effective,
|
||||
adaptive_floor_global_cap_raw: runtime.adaptive_floor_global_cap_raw,
|
||||
adaptive_floor_global_cap_effective: runtime.adaptive_floor_global_cap_effective,
|
||||
adaptive_floor_target_writers_total: runtime.adaptive_floor_target_writers_total,
|
||||
adaptive_floor_active_cap_configured: runtime.adaptive_floor_active_cap_configured,
|
||||
adaptive_floor_active_cap_effective: runtime.adaptive_floor_active_cap_effective,
|
||||
adaptive_floor_warm_cap_configured: runtime.adaptive_floor_warm_cap_configured,
|
||||
adaptive_floor_warm_cap_effective: runtime.adaptive_floor_warm_cap_effective,
|
||||
adaptive_floor_active_writers_current: runtime.adaptive_floor_active_writers_current,
|
||||
adaptive_floor_warm_writers_current: runtime.adaptive_floor_warm_writers_current,
|
||||
me_keepalive_enabled: runtime.me_keepalive_enabled,
|
||||
me_keepalive_interval_secs: runtime.me_keepalive_interval_secs,
|
||||
me_keepalive_jitter_secs: runtime.me_keepalive_jitter_secs,
|
||||
@@ -316,6 +457,8 @@ async fn get_minimal_payload_cached(
|
||||
me_single_endpoint_shadow_rotate_every_secs: runtime
|
||||
.me_single_endpoint_shadow_rotate_every_secs,
|
||||
me_deterministic_writer_sort: runtime.me_deterministic_writer_sort,
|
||||
me_writer_pick_mode: runtime.me_writer_pick_mode,
|
||||
me_writer_pick_sample_size: runtime.me_writer_pick_sample_size,
|
||||
me_socks_kdf_policy: runtime.me_socks_kdf_policy,
|
||||
quarantined_endpoints_total: runtime.quarantined_endpoints.len(),
|
||||
quarantined_endpoints: runtime
|
||||
@@ -370,6 +513,8 @@ fn disabled_me_writers(now_epoch_secs: u64, reason: &'static str) -> MeWritersDa
|
||||
required_writers: 0,
|
||||
alive_writers: 0,
|
||||
coverage_pct: 0.0,
|
||||
fresh_alive_writers: 0,
|
||||
fresh_coverage_pct: 0.0,
|
||||
},
|
||||
writers: Vec::new(),
|
||||
}
|
||||
@@ -384,6 +529,25 @@ fn disabled_dcs(now_epoch_secs: u64, reason: &'static str) -> DcStatusData {
|
||||
}
|
||||
}
|
||||
|
||||
fn map_route_kind(value: UpstreamRouteKind) -> &'static str {
|
||||
match value {
|
||||
UpstreamRouteKind::Direct => "direct",
|
||||
UpstreamRouteKind::Socks4 => "socks4",
|
||||
UpstreamRouteKind::Socks5 => "socks5",
|
||||
UpstreamRouteKind::Shadowsocks => "shadowsocks",
|
||||
}
|
||||
}
|
||||
|
||||
fn map_ip_preference(value: IpPreference) -> &'static str {
|
||||
match value {
|
||||
IpPreference::Unknown => "unknown",
|
||||
IpPreference::PreferV6 => "prefer_v6",
|
||||
IpPreference::PreferV4 => "prefer_v4",
|
||||
IpPreference::BothWork => "both_work",
|
||||
IpPreference::Unavailable => "unavailable",
|
||||
}
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
|
||||
66
src/api/runtime_watch.rs
Normal file
66
src/api/runtime_watch.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use tokio::sync::watch;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
|
||||
use super::ApiRuntimeState;
|
||||
use super::events::ApiEventStore;
|
||||
|
||||
pub(super) fn spawn_runtime_watchers(
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
admission_rx: watch::Receiver<bool>,
|
||||
runtime_state: Arc<ApiRuntimeState>,
|
||||
runtime_events: Arc<ApiEventStore>,
|
||||
) {
|
||||
let mut config_rx_reload = config_rx;
|
||||
let runtime_state_reload = runtime_state.clone();
|
||||
let runtime_events_reload = runtime_events.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if config_rx_reload.changed().await.is_err() {
|
||||
break;
|
||||
}
|
||||
runtime_state_reload
|
||||
.config_reload_count
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
runtime_state_reload
|
||||
.last_config_reload_epoch_secs
|
||||
.store(now_epoch_secs(), Ordering::Relaxed);
|
||||
runtime_events_reload.record("config.reload.applied", "config receiver updated");
|
||||
}
|
||||
});
|
||||
|
||||
let mut admission_rx_watch = admission_rx;
|
||||
tokio::spawn(async move {
|
||||
runtime_state
|
||||
.admission_open
|
||||
.store(*admission_rx_watch.borrow(), Ordering::Relaxed);
|
||||
runtime_events.record(
|
||||
"admission.state",
|
||||
format!("accepting_new_connections={}", *admission_rx_watch.borrow()),
|
||||
);
|
||||
loop {
|
||||
if admission_rx_watch.changed().await.is_err() {
|
||||
break;
|
||||
}
|
||||
let admission_open = *admission_rx_watch.borrow();
|
||||
runtime_state
|
||||
.admission_open
|
||||
.store(admission_open, Ordering::Relaxed);
|
||||
runtime_events.record(
|
||||
"admission.state",
|
||||
format!("accepting_new_connections={}", admission_open),
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
309
src/api/runtime_zero.rs
Normal file
309
src/api/runtime_zero.rs
Normal file
@@ -0,0 +1,309 @@
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::config::{MeFloorMode, MeWriterPickMode, ProxyConfig, UserMaxUniqueIpsMode};
|
||||
use crate::proxy::route_mode::RelayRouteMode;
|
||||
|
||||
use super::ApiShared;
|
||||
use super::runtime_init::build_runtime_startup_summary;
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct SystemInfoData {
|
||||
pub(super) version: String,
|
||||
pub(super) target_arch: String,
|
||||
pub(super) target_os: String,
|
||||
pub(super) build_profile: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) git_commit: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) build_time_utc: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) rustc_version: Option<String>,
|
||||
pub(super) process_started_at_epoch_secs: u64,
|
||||
pub(super) uptime_seconds: f64,
|
||||
pub(super) config_path: String,
|
||||
pub(super) config_hash: String,
|
||||
pub(super) config_reload_count: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) last_config_reload_epoch_secs: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeGatesData {
|
||||
pub(super) accepting_new_connections: bool,
|
||||
pub(super) conditional_cast_enabled: bool,
|
||||
pub(super) me_runtime_ready: bool,
|
||||
pub(super) me2dc_fallback_enabled: bool,
|
||||
pub(super) use_middle_proxy: bool,
|
||||
pub(super) route_mode: &'static str,
|
||||
pub(super) reroute_active: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reroute_to_direct_at_epoch_secs: Option<u64>,
|
||||
pub(super) startup_status: &'static str,
|
||||
pub(super) startup_stage: String,
|
||||
pub(super) startup_progress_pct: f64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveTimeoutLimits {
|
||||
pub(super) client_handshake_secs: u64,
|
||||
pub(super) tg_connect_secs: u64,
|
||||
pub(super) client_keepalive_secs: u64,
|
||||
pub(super) client_ack_secs: u64,
|
||||
pub(super) me_one_retry: u8,
|
||||
pub(super) me_one_timeout_ms: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveUpstreamLimits {
|
||||
pub(super) connect_retry_attempts: u32,
|
||||
pub(super) connect_retry_backoff_ms: u64,
|
||||
pub(super) connect_budget_ms: u64,
|
||||
pub(super) unhealthy_fail_threshold: u32,
|
||||
pub(super) connect_failfast_hard_errors: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveMiddleProxyLimits {
|
||||
pub(super) floor_mode: &'static str,
|
||||
pub(super) adaptive_floor_idle_secs: u64,
|
||||
pub(super) adaptive_floor_min_writers_single_endpoint: u8,
|
||||
pub(super) adaptive_floor_min_writers_multi_endpoint: u8,
|
||||
pub(super) adaptive_floor_recover_grace_secs: u64,
|
||||
pub(super) adaptive_floor_writers_per_core_total: u16,
|
||||
pub(super) adaptive_floor_cpu_cores_override: u16,
|
||||
pub(super) adaptive_floor_max_extra_writers_single_per_core: u16,
|
||||
pub(super) adaptive_floor_max_extra_writers_multi_per_core: u16,
|
||||
pub(super) adaptive_floor_max_active_writers_per_core: u16,
|
||||
pub(super) adaptive_floor_max_warm_writers_per_core: u16,
|
||||
pub(super) adaptive_floor_max_active_writers_global: u32,
|
||||
pub(super) adaptive_floor_max_warm_writers_global: u32,
|
||||
pub(super) reconnect_max_concurrent_per_dc: u32,
|
||||
pub(super) reconnect_backoff_base_ms: u64,
|
||||
pub(super) reconnect_backoff_cap_ms: u64,
|
||||
pub(super) reconnect_fast_retry_count: u32,
|
||||
pub(super) writer_pick_mode: &'static str,
|
||||
pub(super) writer_pick_sample_size: u8,
|
||||
pub(super) me2dc_fallback: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveUserIpPolicyLimits {
|
||||
pub(super) global_each: usize,
|
||||
pub(super) mode: &'static str,
|
||||
pub(super) window_secs: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveLimitsData {
|
||||
pub(super) update_every_secs: u64,
|
||||
pub(super) me_reinit_every_secs: u64,
|
||||
pub(super) me_pool_force_close_secs: u64,
|
||||
pub(super) timeouts: EffectiveTimeoutLimits,
|
||||
pub(super) upstream: EffectiveUpstreamLimits,
|
||||
pub(super) middle_proxy: EffectiveMiddleProxyLimits,
|
||||
pub(super) user_ip_policy: EffectiveUserIpPolicyLimits,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct SecurityPostureData {
|
||||
pub(super) api_read_only: bool,
|
||||
pub(super) api_whitelist_enabled: bool,
|
||||
pub(super) api_whitelist_entries: usize,
|
||||
pub(super) api_auth_header_enabled: bool,
|
||||
pub(super) proxy_protocol_enabled: bool,
|
||||
pub(super) log_level: String,
|
||||
pub(super) telemetry_core_enabled: bool,
|
||||
pub(super) telemetry_user_enabled: bool,
|
||||
pub(super) telemetry_me_level: String,
|
||||
}
|
||||
|
||||
pub(super) fn build_system_info_data(
|
||||
shared: &ApiShared,
|
||||
_cfg: &ProxyConfig,
|
||||
revision: &str,
|
||||
) -> SystemInfoData {
|
||||
let last_reload_epoch_secs = shared
|
||||
.runtime_state
|
||||
.last_config_reload_epoch_secs
|
||||
.load(Ordering::Relaxed);
|
||||
let last_config_reload_epoch_secs =
|
||||
(last_reload_epoch_secs > 0).then_some(last_reload_epoch_secs);
|
||||
|
||||
let git_commit = option_env!("TELEMT_GIT_COMMIT")
|
||||
.or(option_env!("VERGEN_GIT_SHA"))
|
||||
.or(option_env!("GIT_COMMIT"))
|
||||
.map(ToString::to_string);
|
||||
let build_time_utc = option_env!("BUILD_TIME_UTC")
|
||||
.or(option_env!("VERGEN_BUILD_TIMESTAMP"))
|
||||
.map(ToString::to_string);
|
||||
let rustc_version = option_env!("RUSTC_VERSION")
|
||||
.or(option_env!("VERGEN_RUSTC_SEMVER"))
|
||||
.map(ToString::to_string);
|
||||
|
||||
SystemInfoData {
|
||||
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||
target_arch: std::env::consts::ARCH.to_string(),
|
||||
target_os: std::env::consts::OS.to_string(),
|
||||
build_profile: option_env!("PROFILE").unwrap_or("unknown").to_string(),
|
||||
git_commit,
|
||||
build_time_utc,
|
||||
rustc_version,
|
||||
process_started_at_epoch_secs: shared.runtime_state.process_started_at_epoch_secs,
|
||||
uptime_seconds: shared.stats.uptime_secs(),
|
||||
config_path: shared.config_path.display().to_string(),
|
||||
config_hash: revision.to_string(),
|
||||
config_reload_count: shared
|
||||
.runtime_state
|
||||
.config_reload_count
|
||||
.load(Ordering::Relaxed),
|
||||
last_config_reload_epoch_secs,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_gates_data(
|
||||
shared: &ApiShared,
|
||||
cfg: &ProxyConfig,
|
||||
) -> RuntimeGatesData {
|
||||
let startup_summary = build_runtime_startup_summary(shared).await;
|
||||
let route_state = shared.route_runtime.snapshot();
|
||||
let route_mode = route_state.mode.as_str();
|
||||
let reroute_active = cfg.general.use_middle_proxy
|
||||
&& cfg.general.me2dc_fallback
|
||||
&& matches!(route_state.mode, RelayRouteMode::Direct);
|
||||
let reroute_to_direct_at_epoch_secs = if reroute_active {
|
||||
shared.route_runtime.direct_since_epoch_secs()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let me_runtime_ready = if !cfg.general.use_middle_proxy {
|
||||
true
|
||||
} else {
|
||||
shared
|
||||
.me_pool
|
||||
.read()
|
||||
.await
|
||||
.as_ref()
|
||||
.map(|pool| pool.is_runtime_ready())
|
||||
.unwrap_or(false)
|
||||
};
|
||||
|
||||
RuntimeGatesData {
|
||||
accepting_new_connections: shared.runtime_state.admission_open.load(Ordering::Relaxed),
|
||||
conditional_cast_enabled: cfg.general.use_middle_proxy,
|
||||
me_runtime_ready,
|
||||
me2dc_fallback_enabled: cfg.general.me2dc_fallback,
|
||||
use_middle_proxy: cfg.general.use_middle_proxy,
|
||||
route_mode,
|
||||
reroute_active,
|
||||
reroute_to_direct_at_epoch_secs,
|
||||
startup_status: startup_summary.status,
|
||||
startup_stage: startup_summary.stage,
|
||||
startup_progress_pct: startup_summary.progress_pct,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn build_limits_effective_data(cfg: &ProxyConfig) -> EffectiveLimitsData {
|
||||
EffectiveLimitsData {
|
||||
update_every_secs: cfg.general.effective_update_every_secs(),
|
||||
me_reinit_every_secs: cfg.general.effective_me_reinit_every_secs(),
|
||||
me_pool_force_close_secs: cfg.general.effective_me_pool_force_close_secs(),
|
||||
timeouts: EffectiveTimeoutLimits {
|
||||
client_handshake_secs: cfg.timeouts.client_handshake,
|
||||
tg_connect_secs: cfg.timeouts.tg_connect,
|
||||
client_keepalive_secs: cfg.timeouts.client_keepalive,
|
||||
client_ack_secs: cfg.timeouts.client_ack,
|
||||
me_one_retry: cfg.timeouts.me_one_retry,
|
||||
me_one_timeout_ms: cfg.timeouts.me_one_timeout_ms,
|
||||
},
|
||||
upstream: EffectiveUpstreamLimits {
|
||||
connect_retry_attempts: cfg.general.upstream_connect_retry_attempts,
|
||||
connect_retry_backoff_ms: cfg.general.upstream_connect_retry_backoff_ms,
|
||||
connect_budget_ms: cfg.general.upstream_connect_budget_ms,
|
||||
unhealthy_fail_threshold: cfg.general.upstream_unhealthy_fail_threshold,
|
||||
connect_failfast_hard_errors: cfg.general.upstream_connect_failfast_hard_errors,
|
||||
},
|
||||
middle_proxy: EffectiveMiddleProxyLimits {
|
||||
floor_mode: me_floor_mode_label(cfg.general.me_floor_mode),
|
||||
adaptive_floor_idle_secs: cfg.general.me_adaptive_floor_idle_secs,
|
||||
adaptive_floor_min_writers_single_endpoint: cfg
|
||||
.general
|
||||
.me_adaptive_floor_min_writers_single_endpoint,
|
||||
adaptive_floor_min_writers_multi_endpoint: cfg
|
||||
.general
|
||||
.me_adaptive_floor_min_writers_multi_endpoint,
|
||||
adaptive_floor_recover_grace_secs: cfg.general.me_adaptive_floor_recover_grace_secs,
|
||||
adaptive_floor_writers_per_core_total: cfg
|
||||
.general
|
||||
.me_adaptive_floor_writers_per_core_total,
|
||||
adaptive_floor_cpu_cores_override: cfg.general.me_adaptive_floor_cpu_cores_override,
|
||||
adaptive_floor_max_extra_writers_single_per_core: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_extra_writers_single_per_core,
|
||||
adaptive_floor_max_extra_writers_multi_per_core: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_extra_writers_multi_per_core,
|
||||
adaptive_floor_max_active_writers_per_core: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_active_writers_per_core,
|
||||
adaptive_floor_max_warm_writers_per_core: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_warm_writers_per_core,
|
||||
adaptive_floor_max_active_writers_global: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_active_writers_global,
|
||||
adaptive_floor_max_warm_writers_global: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_warm_writers_global,
|
||||
reconnect_max_concurrent_per_dc: cfg.general.me_reconnect_max_concurrent_per_dc,
|
||||
reconnect_backoff_base_ms: cfg.general.me_reconnect_backoff_base_ms,
|
||||
reconnect_backoff_cap_ms: cfg.general.me_reconnect_backoff_cap_ms,
|
||||
reconnect_fast_retry_count: cfg.general.me_reconnect_fast_retry_count,
|
||||
writer_pick_mode: me_writer_pick_mode_label(cfg.general.me_writer_pick_mode),
|
||||
writer_pick_sample_size: cfg.general.me_writer_pick_sample_size,
|
||||
me2dc_fallback: cfg.general.me2dc_fallback,
|
||||
},
|
||||
user_ip_policy: EffectiveUserIpPolicyLimits {
|
||||
global_each: cfg.access.user_max_unique_ips_global_each,
|
||||
mode: user_max_unique_ips_mode_label(cfg.access.user_max_unique_ips_mode),
|
||||
window_secs: cfg.access.user_max_unique_ips_window_secs,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn build_security_posture_data(cfg: &ProxyConfig) -> SecurityPostureData {
|
||||
SecurityPostureData {
|
||||
api_read_only: cfg.server.api.read_only,
|
||||
api_whitelist_enabled: !cfg.server.api.whitelist.is_empty(),
|
||||
api_whitelist_entries: cfg.server.api.whitelist.len(),
|
||||
api_auth_header_enabled: !cfg.server.api.auth_header.is_empty(),
|
||||
proxy_protocol_enabled: cfg.server.proxy_protocol,
|
||||
log_level: cfg.general.log_level.to_string(),
|
||||
telemetry_core_enabled: cfg.general.telemetry.core_enabled,
|
||||
telemetry_user_enabled: cfg.general.telemetry.user_enabled,
|
||||
telemetry_me_level: cfg.general.telemetry.me_level.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn user_max_unique_ips_mode_label(mode: UserMaxUniqueIpsMode) -> &'static str {
|
||||
match mode {
|
||||
UserMaxUniqueIpsMode::ActiveWindow => "active_window",
|
||||
UserMaxUniqueIpsMode::TimeWindow => "time_window",
|
||||
UserMaxUniqueIpsMode::Combined => "combined",
|
||||
}
|
||||
}
|
||||
|
||||
fn me_floor_mode_label(mode: MeFloorMode) -> &'static str {
|
||||
match mode {
|
||||
MeFloorMode::Static => "static",
|
||||
MeFloorMode::Adaptive => "adaptive",
|
||||
}
|
||||
}
|
||||
|
||||
fn me_writer_pick_mode_label(mode: MeWriterPickMode) -> &'static str {
|
||||
match mode {
|
||||
MeWriterPickMode::SortedRr => "sorted_rr",
|
||||
MeWriterPickMode::P2c => "p2c",
|
||||
}
|
||||
}
|
||||
231
src/api/users.rs
231
src/api/users.rs
@@ -1,4 +1,3 @@
|
||||
use std::collections::HashMap;
|
||||
use std::net::IpAddr;
|
||||
|
||||
use hyper::StatusCode;
|
||||
@@ -9,7 +8,8 @@ use crate::stats::Stats;
|
||||
|
||||
use super::ApiShared;
|
||||
use super::config_store::{
|
||||
ensure_expected_revision, load_config_from_disk, save_config_to_disk,
|
||||
AccessSection, ensure_expected_revision, load_config_from_disk, save_access_sections_to_disk,
|
||||
save_config_to_disk,
|
||||
};
|
||||
use super::model::{
|
||||
ApiFailure, CreateUserRequest, CreateUserResponse, PatchUserRequest, RotateSecretRequest,
|
||||
@@ -22,6 +22,12 @@ pub(super) async fn create_user(
|
||||
expected_revision: Option<String>,
|
||||
shared: &ApiShared,
|
||||
) -> Result<(CreateUserResponse, String), ApiFailure> {
|
||||
let touches_user_ad_tags = body.user_ad_tag.is_some();
|
||||
let touches_user_max_tcp_conns = body.max_tcp_conns.is_some();
|
||||
let touches_user_expirations = body.expiration_rfc3339.is_some();
|
||||
let touches_user_data_quota = body.data_quota_bytes.is_some();
|
||||
let touches_user_max_unique_ips = body.max_unique_ips.is_some();
|
||||
|
||||
if !is_valid_username(&body.username) {
|
||||
return Err(ApiFailure::bad_request(
|
||||
"username must match [A-Za-z0-9_.-] and be 1..64 chars",
|
||||
@@ -40,7 +46,9 @@ pub(super) async fn create_user(
|
||||
None => random_user_secret(),
|
||||
};
|
||||
|
||||
if let Some(ad_tag) = body.user_ad_tag.as_ref() && !is_valid_ad_tag(ad_tag) {
|
||||
if let Some(ad_tag) = body.user_ad_tag.as_ref()
|
||||
&& !is_valid_ad_tag(ad_tag)
|
||||
{
|
||||
return Err(ApiFailure::bad_request(
|
||||
"user_ad_tag must be exactly 32 hex characters",
|
||||
));
|
||||
@@ -59,12 +67,18 @@ pub(super) async fn create_user(
|
||||
));
|
||||
}
|
||||
|
||||
cfg.access.users.insert(body.username.clone(), secret.clone());
|
||||
cfg.access
|
||||
.users
|
||||
.insert(body.username.clone(), secret.clone());
|
||||
if let Some(ad_tag) = body.user_ad_tag {
|
||||
cfg.access.user_ad_tags.insert(body.username.clone(), ad_tag);
|
||||
cfg.access
|
||||
.user_ad_tags
|
||||
.insert(body.username.clone(), ad_tag);
|
||||
}
|
||||
if let Some(limit) = body.max_tcp_conns {
|
||||
cfg.access.user_max_tcp_conns.insert(body.username.clone(), limit);
|
||||
cfg.access
|
||||
.user_max_tcp_conns
|
||||
.insert(body.username.clone(), limit);
|
||||
}
|
||||
if let Some(expiration) = expiration {
|
||||
cfg.access
|
||||
@@ -72,7 +86,9 @@ pub(super) async fn create_user(
|
||||
.insert(body.username.clone(), expiration);
|
||||
}
|
||||
if let Some(quota) = body.data_quota_bytes {
|
||||
cfg.access.user_data_quota.insert(body.username.clone(), quota);
|
||||
cfg.access
|
||||
.user_data_quota
|
||||
.insert(body.username.clone(), quota);
|
||||
}
|
||||
|
||||
let updated_limit = body.max_unique_ips;
|
||||
@@ -85,14 +101,43 @@ pub(super) async fn create_user(
|
||||
cfg.validate()
|
||||
.map_err(|e| ApiFailure::bad_request(format!("config validation failed: {}", e)))?;
|
||||
|
||||
let revision = save_config_to_disk(&shared.config_path, &cfg).await?;
|
||||
let mut touched_sections = vec![AccessSection::Users];
|
||||
if touches_user_ad_tags {
|
||||
touched_sections.push(AccessSection::UserAdTags);
|
||||
}
|
||||
if touches_user_max_tcp_conns {
|
||||
touched_sections.push(AccessSection::UserMaxTcpConns);
|
||||
}
|
||||
if touches_user_expirations {
|
||||
touched_sections.push(AccessSection::UserExpirations);
|
||||
}
|
||||
if touches_user_data_quota {
|
||||
touched_sections.push(AccessSection::UserDataQuota);
|
||||
}
|
||||
if touches_user_max_unique_ips {
|
||||
touched_sections.push(AccessSection::UserMaxUniqueIps);
|
||||
}
|
||||
|
||||
let revision =
|
||||
save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
|
||||
drop(_guard);
|
||||
|
||||
if let Some(limit) = updated_limit {
|
||||
shared.ip_tracker.set_user_limit(&body.username, limit).await;
|
||||
shared
|
||||
.ip_tracker
|
||||
.set_user_limit(&body.username, limit)
|
||||
.await;
|
||||
}
|
||||
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||
|
||||
let users = users_from_config(&cfg, &shared.stats, &shared.ip_tracker).await;
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
)
|
||||
.await;
|
||||
let user = users
|
||||
.into_iter()
|
||||
.find(|entry| entry.username == body.username)
|
||||
@@ -105,8 +150,11 @@ pub(super) async fn create_user(
|
||||
max_unique_ips: updated_limit,
|
||||
current_connections: 0,
|
||||
active_unique_ips: 0,
|
||||
active_unique_ips_list: Vec::new(),
|
||||
recent_unique_ips: 0,
|
||||
recent_unique_ips_list: Vec::new(),
|
||||
total_octets: 0,
|
||||
links: build_user_links(&cfg, &secret),
|
||||
links: build_user_links(&cfg, &secret, detected_ip_v4, detected_ip_v6),
|
||||
});
|
||||
|
||||
Ok((CreateUserResponse { user, secret }, revision))
|
||||
@@ -118,12 +166,16 @@ pub(super) async fn patch_user(
|
||||
expected_revision: Option<String>,
|
||||
shared: &ApiShared,
|
||||
) -> Result<(UserInfo, String), ApiFailure> {
|
||||
if let Some(secret) = body.secret.as_ref() && !is_valid_user_secret(secret) {
|
||||
if let Some(secret) = body.secret.as_ref()
|
||||
&& !is_valid_user_secret(secret)
|
||||
{
|
||||
return Err(ApiFailure::bad_request(
|
||||
"secret must be exactly 32 hex characters",
|
||||
));
|
||||
}
|
||||
if let Some(ad_tag) = body.user_ad_tag.as_ref() && !is_valid_ad_tag(ad_tag) {
|
||||
if let Some(ad_tag) = body.user_ad_tag.as_ref()
|
||||
&& !is_valid_ad_tag(ad_tag)
|
||||
{
|
||||
return Err(ApiFailure::bad_request(
|
||||
"user_ad_tag must be exactly 32 hex characters",
|
||||
));
|
||||
@@ -148,10 +200,14 @@ pub(super) async fn patch_user(
|
||||
cfg.access.user_ad_tags.insert(user.to_string(), ad_tag);
|
||||
}
|
||||
if let Some(limit) = body.max_tcp_conns {
|
||||
cfg.access.user_max_tcp_conns.insert(user.to_string(), limit);
|
||||
cfg.access
|
||||
.user_max_tcp_conns
|
||||
.insert(user.to_string(), limit);
|
||||
}
|
||||
if let Some(expiration) = expiration {
|
||||
cfg.access.user_expirations.insert(user.to_string(), expiration);
|
||||
cfg.access
|
||||
.user_expirations
|
||||
.insert(user.to_string(), expiration);
|
||||
}
|
||||
if let Some(quota) = body.data_quota_bytes {
|
||||
cfg.access.user_data_quota.insert(user.to_string(), quota);
|
||||
@@ -159,7 +215,9 @@ pub(super) async fn patch_user(
|
||||
|
||||
let mut updated_limit = None;
|
||||
if let Some(limit) = body.max_unique_ips {
|
||||
cfg.access.user_max_unique_ips.insert(user.to_string(), limit);
|
||||
cfg.access
|
||||
.user_max_unique_ips
|
||||
.insert(user.to_string(), limit);
|
||||
updated_limit = Some(limit);
|
||||
}
|
||||
|
||||
@@ -171,7 +229,15 @@ pub(super) async fn patch_user(
|
||||
if let Some(limit) = updated_limit {
|
||||
shared.ip_tracker.set_user_limit(user, limit).await;
|
||||
}
|
||||
let users = users_from_config(&cfg, &shared.stats, &shared.ip_tracker).await;
|
||||
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
)
|
||||
.await;
|
||||
let user_info = users
|
||||
.into_iter()
|
||||
.find(|entry| entry.username == user)
|
||||
@@ -208,10 +274,27 @@ pub(super) async fn rotate_secret(
|
||||
cfg.access.users.insert(user.to_string(), secret.clone());
|
||||
cfg.validate()
|
||||
.map_err(|e| ApiFailure::bad_request(format!("config validation failed: {}", e)))?;
|
||||
let revision = save_config_to_disk(&shared.config_path, &cfg).await?;
|
||||
let touched_sections = [
|
||||
AccessSection::Users,
|
||||
AccessSection::UserAdTags,
|
||||
AccessSection::UserMaxTcpConns,
|
||||
AccessSection::UserExpirations,
|
||||
AccessSection::UserDataQuota,
|
||||
AccessSection::UserMaxUniqueIps,
|
||||
];
|
||||
let revision =
|
||||
save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
|
||||
drop(_guard);
|
||||
|
||||
let users = users_from_config(&cfg, &shared.stats, &shared.ip_tracker).await;
|
||||
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
)
|
||||
.await;
|
||||
let user_info = users
|
||||
.into_iter()
|
||||
.find(|entry| entry.username == user)
|
||||
@@ -259,8 +342,18 @@ pub(super) async fn delete_user(
|
||||
|
||||
cfg.validate()
|
||||
.map_err(|e| ApiFailure::bad_request(format!("config validation failed: {}", e)))?;
|
||||
let revision = save_config_to_disk(&shared.config_path, &cfg).await?;
|
||||
let touched_sections = [
|
||||
AccessSection::Users,
|
||||
AccessSection::UserAdTags,
|
||||
AccessSection::UserMaxTcpConns,
|
||||
AccessSection::UserExpirations,
|
||||
AccessSection::UserDataQuota,
|
||||
AccessSection::UserMaxUniqueIps,
|
||||
];
|
||||
let revision =
|
||||
save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
|
||||
drop(_guard);
|
||||
shared.ip_tracker.remove_user_limit(user).await;
|
||||
shared.ip_tracker.clear_user_ips(user).await;
|
||||
|
||||
Ok((user.to_string(), revision))
|
||||
@@ -270,24 +363,31 @@ pub(super) async fn users_from_config(
|
||||
cfg: &ProxyConfig,
|
||||
stats: &Stats,
|
||||
ip_tracker: &UserIpTracker,
|
||||
startup_detected_ip_v4: Option<IpAddr>,
|
||||
startup_detected_ip_v6: Option<IpAddr>,
|
||||
) -> Vec<UserInfo> {
|
||||
let ip_counts = ip_tracker
|
||||
.get_stats()
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|(user, count, _)| (user, count))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let mut names = cfg.access.users.keys().cloned().collect::<Vec<_>>();
|
||||
names.sort();
|
||||
let active_ip_lists = ip_tracker.get_active_ips_for_users(&names).await;
|
||||
let recent_ip_lists = ip_tracker.get_recent_ips_for_users(&names).await;
|
||||
|
||||
let mut users = Vec::with_capacity(names.len());
|
||||
for username in names {
|
||||
let active_ip_list = active_ip_lists
|
||||
.get(&username)
|
||||
.cloned()
|
||||
.unwrap_or_else(Vec::new);
|
||||
let recent_ip_list = recent_ip_lists
|
||||
.get(&username)
|
||||
.cloned()
|
||||
.unwrap_or_else(Vec::new);
|
||||
let links = cfg
|
||||
.access
|
||||
.users
|
||||
.get(&username)
|
||||
.map(|secret| build_user_links(cfg, secret))
|
||||
.map(|secret| {
|
||||
build_user_links(cfg, secret, startup_detected_ip_v4, startup_detected_ip_v6)
|
||||
})
|
||||
.unwrap_or(UserLinks {
|
||||
classic: Vec::new(),
|
||||
secure: Vec::new(),
|
||||
@@ -302,9 +402,19 @@ pub(super) async fn users_from_config(
|
||||
.get(&username)
|
||||
.map(chrono::DateTime::<chrono::Utc>::to_rfc3339),
|
||||
data_quota_bytes: cfg.access.user_data_quota.get(&username).copied(),
|
||||
max_unique_ips: cfg.access.user_max_unique_ips.get(&username).copied(),
|
||||
max_unique_ips: cfg
|
||||
.access
|
||||
.user_max_unique_ips
|
||||
.get(&username)
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((cfg.access.user_max_unique_ips_global_each > 0)
|
||||
.then_some(cfg.access.user_max_unique_ips_global_each)),
|
||||
current_connections: stats.get_user_curr_connects(&username),
|
||||
active_unique_ips: ip_counts.get(&username).copied().unwrap_or(0),
|
||||
active_unique_ips: active_ip_list.len(),
|
||||
active_unique_ips_list: active_ip_list,
|
||||
recent_unique_ips: recent_ip_list.len(),
|
||||
recent_unique_ips_list: recent_ip_list,
|
||||
total_octets: stats.get_user_total_octets(&username),
|
||||
links,
|
||||
username,
|
||||
@@ -313,8 +423,13 @@ pub(super) async fn users_from_config(
|
||||
users
|
||||
}
|
||||
|
||||
fn build_user_links(cfg: &ProxyConfig, secret: &str) -> UserLinks {
|
||||
let hosts = resolve_link_hosts(cfg);
|
||||
fn build_user_links(
|
||||
cfg: &ProxyConfig,
|
||||
secret: &str,
|
||||
startup_detected_ip_v4: Option<IpAddr>,
|
||||
startup_detected_ip_v6: Option<IpAddr>,
|
||||
) -> UserLinks {
|
||||
let hosts = resolve_link_hosts(cfg, startup_detected_ip_v4, startup_detected_ip_v6);
|
||||
let port = cfg.general.links.public_port.unwrap_or(cfg.server.port);
|
||||
let tls_domains = resolve_tls_domains(cfg);
|
||||
|
||||
@@ -353,7 +468,11 @@ fn build_user_links(cfg: &ProxyConfig, secret: &str) -> UserLinks {
|
||||
}
|
||||
}
|
||||
|
||||
fn resolve_link_hosts(cfg: &ProxyConfig) -> Vec<String> {
|
||||
fn resolve_link_hosts(
|
||||
cfg: &ProxyConfig,
|
||||
startup_detected_ip_v4: Option<IpAddr>,
|
||||
startup_detected_ip_v6: Option<IpAddr>,
|
||||
) -> Vec<String> {
|
||||
if let Some(host) = cfg
|
||||
.general
|
||||
.links
|
||||
@@ -376,27 +495,47 @@ fn resolve_link_hosts(cfg: &ProxyConfig) -> Vec<String> {
|
||||
push_unique_host(&mut hosts, host);
|
||||
continue;
|
||||
}
|
||||
if let Some(ip) = listener.announce_ip {
|
||||
if !ip.is_unspecified() {
|
||||
if let Some(ip) = listener.announce_ip
|
||||
&& !ip.is_unspecified()
|
||||
{
|
||||
push_unique_host(&mut hosts, &ip.to_string());
|
||||
continue;
|
||||
}
|
||||
if listener.ip.is_unspecified() {
|
||||
let detected_ip = if listener.ip.is_ipv4() {
|
||||
startup_detected_ip_v4
|
||||
} else {
|
||||
startup_detected_ip_v6
|
||||
};
|
||||
if let Some(ip) = detected_ip {
|
||||
push_unique_host(&mut hosts, &ip.to_string());
|
||||
} else {
|
||||
push_unique_host(&mut hosts, &listener.ip.to_string());
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if !listener.ip.is_unspecified() {
|
||||
push_unique_host(&mut hosts, &listener.ip.to_string());
|
||||
}
|
||||
push_unique_host(&mut hosts, &listener.ip.to_string());
|
||||
}
|
||||
|
||||
if hosts.is_empty() {
|
||||
if let Some(host) = cfg.server.listen_addr_ipv4.as_deref() {
|
||||
push_host_from_legacy_listen(&mut hosts, host);
|
||||
}
|
||||
if let Some(host) = cfg.server.listen_addr_ipv6.as_deref() {
|
||||
push_host_from_legacy_listen(&mut hosts, host);
|
||||
}
|
||||
if !hosts.is_empty() {
|
||||
return hosts;
|
||||
}
|
||||
|
||||
hosts
|
||||
if let Some(ip) = startup_detected_ip_v4.or(startup_detected_ip_v6) {
|
||||
return vec![ip.to_string()];
|
||||
}
|
||||
|
||||
if let Some(host) = cfg.server.listen_addr_ipv4.as_deref() {
|
||||
push_host_from_legacy_listen(&mut hosts, host);
|
||||
}
|
||||
if let Some(host) = cfg.server.listen_addr_ipv6.as_deref() {
|
||||
push_host_from_legacy_listen(&mut hosts, host);
|
||||
}
|
||||
if !hosts.is_empty() {
|
||||
return hosts;
|
||||
}
|
||||
|
||||
vec!["UNKNOWN".to_string()]
|
||||
}
|
||||
|
||||
fn push_host_from_legacy_listen(hosts: &mut Vec<String>, raw: &str) {
|
||||
|
||||
69
src/cli.rs
69
src/cli.rs
@@ -1,9 +1,9 @@
|
||||
//! CLI commands: --init (fire-and-forget setup)
|
||||
|
||||
use rand::RngExt;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command;
|
||||
use rand::Rng;
|
||||
|
||||
/// Options for the init command
|
||||
pub struct InitOptions {
|
||||
@@ -35,10 +35,10 @@ pub fn parse_init_args(args: &[String]) -> Option<InitOptions> {
|
||||
if !args.iter().any(|a| a == "--init") {
|
||||
return None;
|
||||
}
|
||||
|
||||
|
||||
let mut opts = InitOptions::default();
|
||||
let mut i = 0;
|
||||
|
||||
|
||||
while i < args.len() {
|
||||
match args[i].as_str() {
|
||||
"--port" => {
|
||||
@@ -78,7 +78,7 @@ pub fn parse_init_args(args: &[String]) -> Option<InitOptions> {
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
|
||||
Some(opts)
|
||||
}
|
||||
|
||||
@@ -86,7 +86,7 @@ pub fn parse_init_args(args: &[String]) -> Option<InitOptions> {
|
||||
pub fn run_init(opts: InitOptions) -> Result<(), Box<dyn std::error::Error>> {
|
||||
eprintln!("[telemt] Fire-and-forget setup");
|
||||
eprintln!();
|
||||
|
||||
|
||||
// 1. Generate or validate secret
|
||||
let secret = match opts.secret {
|
||||
Some(s) => {
|
||||
@@ -98,28 +98,28 @@ pub fn run_init(opts: InitOptions) -> Result<(), Box<dyn std::error::Error>> {
|
||||
}
|
||||
None => generate_secret(),
|
||||
};
|
||||
|
||||
|
||||
eprintln!("[+] Secret: {}", secret);
|
||||
eprintln!("[+] User: {}", opts.username);
|
||||
eprintln!("[+] Port: {}", opts.port);
|
||||
eprintln!("[+] Domain: {}", opts.domain);
|
||||
|
||||
|
||||
// 2. Create config directory
|
||||
fs::create_dir_all(&opts.config_dir)?;
|
||||
let config_path = opts.config_dir.join("config.toml");
|
||||
|
||||
|
||||
// 3. Write config
|
||||
let config_content = generate_config(&opts.username, &secret, opts.port, &opts.domain);
|
||||
fs::write(&config_path, &config_content)?;
|
||||
eprintln!("[+] Config written to {}", config_path.display());
|
||||
|
||||
|
||||
// 4. Write systemd unit
|
||||
let exe_path = std::env::current_exe()
|
||||
.unwrap_or_else(|_| PathBuf::from("/usr/local/bin/telemt"));
|
||||
|
||||
let exe_path =
|
||||
std::env::current_exe().unwrap_or_else(|_| PathBuf::from("/usr/local/bin/telemt"));
|
||||
|
||||
let unit_path = Path::new("/etc/systemd/system/telemt.service");
|
||||
let unit_content = generate_systemd_unit(&exe_path, &config_path);
|
||||
|
||||
|
||||
match fs::write(unit_path, &unit_content) {
|
||||
Ok(()) => {
|
||||
eprintln!("[+] Systemd unit written to {}", unit_path.display());
|
||||
@@ -128,31 +128,31 @@ pub fn run_init(opts: InitOptions) -> Result<(), Box<dyn std::error::Error>> {
|
||||
eprintln!("[!] Cannot write systemd unit (run as root?): {}", e);
|
||||
eprintln!("[!] Manual unit file content:");
|
||||
eprintln!("{}", unit_content);
|
||||
|
||||
|
||||
// Still print links and config
|
||||
print_links(&opts.username, &secret, opts.port, &opts.domain);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// 5. Reload systemd
|
||||
run_cmd("systemctl", &["daemon-reload"]);
|
||||
|
||||
|
||||
// 6. Enable service
|
||||
run_cmd("systemctl", &["enable", "telemt.service"]);
|
||||
eprintln!("[+] Service enabled");
|
||||
|
||||
|
||||
// 7. Start service (unless --no-start)
|
||||
if !opts.no_start {
|
||||
run_cmd("systemctl", &["start", "telemt.service"]);
|
||||
eprintln!("[+] Service started");
|
||||
|
||||
|
||||
// Brief delay then check status
|
||||
std::thread::sleep(std::time::Duration::from_secs(1));
|
||||
let status = Command::new("systemctl")
|
||||
.args(["is-active", "telemt.service"])
|
||||
.output();
|
||||
|
||||
|
||||
match status {
|
||||
Ok(out) if out.status.success() => {
|
||||
eprintln!("[+] Service is running");
|
||||
@@ -166,12 +166,12 @@ pub fn run_init(opts: InitOptions) -> Result<(), Box<dyn std::error::Error>> {
|
||||
eprintln!("[+] Service not started (--no-start)");
|
||||
eprintln!("[+] Start manually: systemctl start telemt.service");
|
||||
}
|
||||
|
||||
|
||||
eprintln!();
|
||||
|
||||
|
||||
// 8. Print links
|
||||
print_links(&opts.username, &secret, opts.port, &opts.domain);
|
||||
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -183,7 +183,7 @@ fn generate_secret() -> String {
|
||||
|
||||
fn generate_config(username: &str, secret: &str, port: u16, domain: &str) -> String {
|
||||
format!(
|
||||
r#"# Telemt MTProxy — auto-generated config
|
||||
r#"# Telemt MTProxy — auto-generated config
|
||||
# Re-run `telemt --init` to regenerate
|
||||
|
||||
show_link = ["{username}"]
|
||||
@@ -198,8 +198,15 @@ desync_all_full = false
|
||||
update_every = 43200
|
||||
hardswap = false
|
||||
me_pool_drain_ttl_secs = 90
|
||||
me_instadrain = false
|
||||
me_pool_drain_threshold = 32
|
||||
me_pool_drain_soft_evict_grace_secs = 10
|
||||
me_pool_drain_soft_evict_per_writer = 2
|
||||
me_pool_drain_soft_evict_budget_per_core = 16
|
||||
me_pool_drain_soft_evict_cooldown_ms = 1000
|
||||
me_bind_stale_mode = "never"
|
||||
me_pool_min_fresh_ratio = 0.8
|
||||
me_reinit_drain_timeout_secs = 120
|
||||
me_reinit_drain_timeout_secs = 90
|
||||
|
||||
[network]
|
||||
ipv4 = true
|
||||
@@ -239,7 +246,7 @@ tls_full_cert_ttl_secs = 90
|
||||
|
||||
[access]
|
||||
replay_check_len = 65536
|
||||
replay_window_secs = 1800
|
||||
replay_window_secs = 120
|
||||
ignore_time_skew = false
|
||||
|
||||
[access.users]
|
||||
@@ -259,9 +266,9 @@ weight = 10
|
||||
|
||||
fn generate_systemd_unit(exe_path: &Path, config_path: &Path) -> String {
|
||||
format!(
|
||||
r#"[Unit]
|
||||
r#"[Unit]
|
||||
Description=Telemt MTProxy
|
||||
Documentation=https://github.com/nicepkg/telemt
|
||||
Documentation=https://github.com/telemt/telemt
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
@@ -302,11 +309,13 @@ fn run_cmd(cmd: &str, args: &[&str]) {
|
||||
|
||||
fn print_links(username: &str, secret: &str, port: u16, domain: &str) {
|
||||
let domain_hex = hex::encode(domain);
|
||||
|
||||
|
||||
println!("=== Proxy Links ===");
|
||||
println!("[{}]", username);
|
||||
println!(" EE-TLS: tg://proxy?server=YOUR_SERVER_IP&port={}&secret=ee{}{}",
|
||||
port, secret, domain_hex);
|
||||
println!(
|
||||
" EE-TLS: tg://proxy?server=YOUR_SERVER_IP&port={}&secret=ee{}{}",
|
||||
port, secret, domain_hex
|
||||
);
|
||||
println!();
|
||||
println!("Replace YOUR_SERVER_IP with your server's public IP.");
|
||||
println!("The proxy will auto-detect and display the correct link on startup.");
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::collections::HashMap;
|
||||
use ipnetwork::IpNetwork;
|
||||
use serde::Deserialize;
|
||||
use std::collections::HashMap;
|
||||
|
||||
// Helper defaults kept private to the config module.
|
||||
const DEFAULT_NETWORK_IPV6: Option<bool> = Some(false);
|
||||
@@ -11,9 +11,46 @@ const DEFAULT_ME_RECONNECT_FAST_RETRY_COUNT: u32 = 16;
|
||||
const DEFAULT_ME_SINGLE_ENDPOINT_SHADOW_WRITERS: u8 = 2;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_IDLE_SECS: u64 = 90;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MIN_WRITERS_SINGLE_ENDPOINT: u8 = 1;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MIN_WRITERS_MULTI_ENDPOINT: u8 = 1;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_RECOVER_GRACE_SECS: u64 = 180;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_WRITERS_PER_CORE_TOTAL: u16 = 48;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_CPU_CORES_OVERRIDE: u16 = 0;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_EXTRA_WRITERS_SINGLE_PER_CORE: u16 = 1;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_EXTRA_WRITERS_MULTI_PER_CORE: u16 = 2;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_ACTIVE_WRITERS_PER_CORE: u16 = 64;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_WARM_WRITERS_PER_CORE: u16 = 64;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_ACTIVE_WRITERS_GLOBAL: u32 = 256;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_WARM_WRITERS_GLOBAL: u32 = 256;
|
||||
const DEFAULT_ME_WRITER_CMD_CHANNEL_CAPACITY: usize = 4096;
|
||||
const DEFAULT_ME_ROUTE_CHANNEL_CAPACITY: usize = 768;
|
||||
const DEFAULT_ME_C2ME_CHANNEL_CAPACITY: usize = 1024;
|
||||
const DEFAULT_ME_READER_ROUTE_DATA_WAIT_MS: u64 = 2;
|
||||
const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_FRAMES: usize = 32;
|
||||
const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_BYTES: usize = 128 * 1024;
|
||||
const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_DELAY_US: u64 = 500;
|
||||
const DEFAULT_ME_D2C_ACK_FLUSH_IMMEDIATE: bool = true;
|
||||
const DEFAULT_ME_QUOTA_SOFT_OVERSHOOT_BYTES: u64 = 64 * 1024;
|
||||
const DEFAULT_ME_D2C_FRAME_BUF_SHRINK_THRESHOLD_BYTES: usize = 256 * 1024;
|
||||
const DEFAULT_DIRECT_RELAY_COPY_BUF_C2S_BYTES: usize = 64 * 1024;
|
||||
const DEFAULT_DIRECT_RELAY_COPY_BUF_S2C_BYTES: usize = 256 * 1024;
|
||||
const DEFAULT_ME_WRITER_PICK_SAMPLE_SIZE: u8 = 3;
|
||||
const DEFAULT_ME_HEALTH_INTERVAL_MS_UNHEALTHY: u64 = 1000;
|
||||
const DEFAULT_ME_HEALTH_INTERVAL_MS_HEALTHY: u64 = 3000;
|
||||
const DEFAULT_ME_ADMISSION_POLL_MS: u64 = 1000;
|
||||
const DEFAULT_ME_WARN_RATE_LIMIT_MS: u64 = 5000;
|
||||
const DEFAULT_ME_ROUTE_HYBRID_MAX_WAIT_MS: u64 = 3000;
|
||||
const DEFAULT_ME_ROUTE_BLOCKING_SEND_TIMEOUT_MS: u64 = 250;
|
||||
const DEFAULT_ME_C2ME_SEND_TIMEOUT_MS: u64 = 4000;
|
||||
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_ENABLED: bool = true;
|
||||
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_GRACE_SECS: u64 = 10;
|
||||
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_PER_WRITER: u8 = 2;
|
||||
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_BUDGET_PER_CORE: u16 = 16;
|
||||
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_COOLDOWN_MS: u64 = 1000;
|
||||
const DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS: u64 = 30;
|
||||
const DEFAULT_ACCEPT_PERMIT_TIMEOUT_MS: u64 = 250;
|
||||
const DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS: u32 = 2;
|
||||
const DEFAULT_UPSTREAM_UNHEALTHY_FAIL_THRESHOLD: u32 = 5;
|
||||
const DEFAULT_UPSTREAM_CONNECT_BUDGET_MS: u64 = 3000;
|
||||
const DEFAULT_LISTEN_ADDR_IPV6: &str = "::";
|
||||
const DEFAULT_ACCESS_USER: &str = "default";
|
||||
const DEFAULT_ACCESS_SECRET: &str = "00000000000000000000000000000000";
|
||||
@@ -30,6 +67,26 @@ pub(crate) fn default_tls_domain() -> String {
|
||||
"petrovich.ru".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_fetch_scope() -> String {
|
||||
String::new()
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_fetch_attempt_timeout_ms() -> u64 {
|
||||
5_000
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_fetch_total_budget_ms() -> u64 {
|
||||
15_000
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_fetch_strict_route() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_fetch_profile_cache_ttl_secs() -> u64 {
|
||||
600
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_port() -> u16 {
|
||||
443
|
||||
}
|
||||
@@ -47,23 +104,41 @@ pub(crate) fn default_replay_check_len() -> usize {
|
||||
}
|
||||
|
||||
pub(crate) fn default_replay_window_secs() -> u64 {
|
||||
1800
|
||||
// Keep replay cache TTL tight by default to reduce replay surface.
|
||||
// Deployments with higher RTT or longer reconnect jitter can override this in config.
|
||||
120
|
||||
}
|
||||
|
||||
pub(crate) fn default_handshake_timeout() -> u64 {
|
||||
30
|
||||
}
|
||||
|
||||
pub(crate) fn default_relay_idle_policy_v2_enabled() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_relay_client_idle_soft_secs() -> u64 {
|
||||
120
|
||||
}
|
||||
|
||||
pub(crate) fn default_relay_client_idle_hard_secs() -> u64 {
|
||||
360
|
||||
}
|
||||
|
||||
pub(crate) fn default_relay_idle_grace_after_downstream_activity_secs() -> u64 {
|
||||
30
|
||||
}
|
||||
|
||||
pub(crate) fn default_connect_timeout() -> u64 {
|
||||
10
|
||||
}
|
||||
|
||||
pub(crate) fn default_keepalive() -> u64 {
|
||||
60
|
||||
15
|
||||
}
|
||||
|
||||
pub(crate) fn default_ack_timeout() -> u64 {
|
||||
300
|
||||
90
|
||||
}
|
||||
pub(crate) fn default_me_one_retry() -> u8 {
|
||||
12
|
||||
@@ -86,18 +161,15 @@ pub(crate) fn default_weight() -> u16 {
|
||||
}
|
||||
|
||||
pub(crate) fn default_metrics_whitelist() -> Vec<IpNetwork> {
|
||||
vec![
|
||||
"127.0.0.1/32".parse().unwrap(),
|
||||
"::1/128".parse().unwrap(),
|
||||
]
|
||||
vec!["127.0.0.1/32".parse().unwrap(), "::1/128".parse().unwrap()]
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_listen() -> String {
|
||||
"127.0.0.1:9091".to_string()
|
||||
"0.0.0.0:9091".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_whitelist() -> Vec<IpNetwork> {
|
||||
default_metrics_whitelist()
|
||||
vec!["127.0.0.0/8".parse().unwrap()]
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_request_body_limit_bytes() -> usize {
|
||||
@@ -105,13 +177,42 @@ pub(crate) fn default_api_request_body_limit_bytes() -> usize {
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_minimal_runtime_enabled() -> bool {
|
||||
false
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_minimal_runtime_cache_ttl_ms() -> u64 {
|
||||
1000
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_runtime_edge_enabled() -> bool {
|
||||
false
|
||||
}
|
||||
pub(crate) fn default_api_runtime_edge_cache_ttl_ms() -> u64 {
|
||||
1000
|
||||
}
|
||||
pub(crate) fn default_api_runtime_edge_top_n() -> usize {
|
||||
10
|
||||
}
|
||||
pub(crate) fn default_api_runtime_edge_events_capacity() -> usize {
|
||||
256
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_protocol_header_timeout_ms() -> u64 {
|
||||
500
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_protocol_trusted_cidrs() -> Vec<IpNetwork> {
|
||||
vec!["0.0.0.0/0".parse().unwrap(), "::/0".parse().unwrap()]
|
||||
}
|
||||
|
||||
pub(crate) fn default_server_max_connections() -> u32 {
|
||||
10_000
|
||||
}
|
||||
|
||||
pub(crate) fn default_accept_permit_timeout_ms() -> u64 {
|
||||
DEFAULT_ACCEPT_PERMIT_TIMEOUT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_prefer_4() -> u8 {
|
||||
4
|
||||
}
|
||||
@@ -128,6 +229,10 @@ pub(crate) fn default_unknown_dc_log_path() -> Option<String> {
|
||||
Some("unknown-dc.txt".to_string())
|
||||
}
|
||||
|
||||
pub(crate) fn default_unknown_dc_file_log_enabled() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_pool_size() -> usize {
|
||||
8
|
||||
}
|
||||
@@ -136,6 +241,14 @@ pub(crate) fn default_proxy_secret_path() -> Option<String> {
|
||||
Some("proxy-secret".to_string())
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_config_v4_cache_path() -> Option<String> {
|
||||
Some("cache/proxy-config-v4.txt".to_string())
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_config_v6_cache_path() -> Option<String> {
|
||||
Some("cache/proxy-config-v6.txt".to_string())
|
||||
}
|
||||
|
||||
pub(crate) fn default_middle_proxy_nat_stun() -> Option<String> {
|
||||
None
|
||||
}
|
||||
@@ -152,6 +265,14 @@ pub(crate) fn default_middle_proxy_warm_standby() -> usize {
|
||||
DEFAULT_MIDDLE_PROXY_WARM_STANDBY
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_init_retry_attempts() -> u32 {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_me2dc_fallback() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_keepalive_interval() -> u64 {
|
||||
8
|
||||
}
|
||||
@@ -216,10 +337,126 @@ pub(crate) fn default_me_adaptive_floor_min_writers_single_endpoint() -> u8 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MIN_WRITERS_SINGLE_ENDPOINT
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_min_writers_multi_endpoint() -> u8 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MIN_WRITERS_MULTI_ENDPOINT
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_recover_grace_secs() -> u64 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_RECOVER_GRACE_SECS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_writers_per_core_total() -> u16 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_WRITERS_PER_CORE_TOTAL
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_cpu_cores_override() -> u16 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_CPU_CORES_OVERRIDE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_max_extra_writers_single_per_core() -> u16 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_EXTRA_WRITERS_SINGLE_PER_CORE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_max_extra_writers_multi_per_core() -> u16 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_EXTRA_WRITERS_MULTI_PER_CORE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_max_active_writers_per_core() -> u16 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_ACTIVE_WRITERS_PER_CORE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_max_warm_writers_per_core() -> u16 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_WARM_WRITERS_PER_CORE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_max_active_writers_global() -> u32 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_ACTIVE_WRITERS_GLOBAL
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_max_warm_writers_global() -> u32 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_WARM_WRITERS_GLOBAL
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_writer_cmd_channel_capacity() -> usize {
|
||||
DEFAULT_ME_WRITER_CMD_CHANNEL_CAPACITY
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_channel_capacity() -> usize {
|
||||
DEFAULT_ME_ROUTE_CHANNEL_CAPACITY
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_c2me_channel_capacity() -> usize {
|
||||
DEFAULT_ME_C2ME_CHANNEL_CAPACITY
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_reader_route_data_wait_ms() -> u64 {
|
||||
DEFAULT_ME_READER_ROUTE_DATA_WAIT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_d2c_flush_batch_max_frames() -> usize {
|
||||
DEFAULT_ME_D2C_FLUSH_BATCH_MAX_FRAMES
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_d2c_flush_batch_max_bytes() -> usize {
|
||||
DEFAULT_ME_D2C_FLUSH_BATCH_MAX_BYTES
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_d2c_flush_batch_max_delay_us() -> u64 {
|
||||
DEFAULT_ME_D2C_FLUSH_BATCH_MAX_DELAY_US
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_d2c_ack_flush_immediate() -> bool {
|
||||
DEFAULT_ME_D2C_ACK_FLUSH_IMMEDIATE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_quota_soft_overshoot_bytes() -> u64 {
|
||||
DEFAULT_ME_QUOTA_SOFT_OVERSHOOT_BYTES
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_d2c_frame_buf_shrink_threshold_bytes() -> usize {
|
||||
DEFAULT_ME_D2C_FRAME_BUF_SHRINK_THRESHOLD_BYTES
|
||||
}
|
||||
|
||||
pub(crate) fn default_direct_relay_copy_buf_c2s_bytes() -> usize {
|
||||
DEFAULT_DIRECT_RELAY_COPY_BUF_C2S_BYTES
|
||||
}
|
||||
|
||||
pub(crate) fn default_direct_relay_copy_buf_s2c_bytes() -> usize {
|
||||
DEFAULT_DIRECT_RELAY_COPY_BUF_S2C_BYTES
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_writer_pick_sample_size() -> u8 {
|
||||
DEFAULT_ME_WRITER_PICK_SAMPLE_SIZE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_health_interval_ms_unhealthy() -> u64 {
|
||||
DEFAULT_ME_HEALTH_INTERVAL_MS_UNHEALTHY
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_health_interval_ms_healthy() -> u64 {
|
||||
DEFAULT_ME_HEALTH_INTERVAL_MS_HEALTHY
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_admission_poll_ms() -> u64 {
|
||||
DEFAULT_ME_ADMISSION_POLL_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_warn_rate_limit_ms() -> u64 {
|
||||
DEFAULT_ME_WARN_RATE_LIMIT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_hybrid_max_wait_ms() -> u64 {
|
||||
DEFAULT_ME_ROUTE_HYBRID_MAX_WAIT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_blocking_send_timeout_ms() -> u64 {
|
||||
DEFAULT_ME_ROUTE_BLOCKING_SEND_TIMEOUT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_c2me_send_timeout_ms() -> u64 {
|
||||
DEFAULT_ME_C2ME_SEND_TIMEOUT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_upstream_connect_retry_attempts() -> u32 {
|
||||
DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS
|
||||
}
|
||||
@@ -232,6 +469,10 @@ pub(crate) fn default_upstream_unhealthy_fail_threshold() -> u32 {
|
||||
DEFAULT_UPSTREAM_UNHEALTHY_FAIL_THRESHOLD
|
||||
}
|
||||
|
||||
pub(crate) fn default_upstream_connect_budget_ms() -> u64 {
|
||||
DEFAULT_UPSTREAM_CONNECT_BUDGET_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_upstream_connect_failfast_hard_errors() -> bool {
|
||||
false
|
||||
}
|
||||
@@ -264,6 +505,18 @@ pub(crate) fn default_me_route_backpressure_high_watermark_pct() -> u8 {
|
||||
80
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_no_writer_wait_ms() -> u64 {
|
||||
250
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_inline_recovery_attempts() -> u32 {
|
||||
3
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_inline_recovery_wait_ms() -> u64 {
|
||||
3000
|
||||
}
|
||||
|
||||
pub(crate) fn default_beobachten_minutes() -> u64 {
|
||||
10
|
||||
}
|
||||
@@ -285,17 +538,67 @@ pub(crate) fn default_tls_full_cert_ttl_secs() -> u64 {
|
||||
}
|
||||
|
||||
pub(crate) fn default_server_hello_delay_min_ms() -> u64 {
|
||||
0
|
||||
8
|
||||
}
|
||||
|
||||
pub(crate) fn default_server_hello_delay_max_ms() -> u64 {
|
||||
0
|
||||
24
|
||||
}
|
||||
|
||||
pub(crate) fn default_alpn_enforce() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_shape_hardening() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_shape_hardening_aggressive_mode() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_shape_bucket_floor_bytes() -> usize {
|
||||
512
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_shape_bucket_cap_bytes() -> usize {
|
||||
4096
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_shape_above_cap_blur() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_shape_above_cap_blur_max_bytes() -> usize {
|
||||
512
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
pub(crate) fn default_mask_relay_max_bytes() -> usize {
|
||||
5 * 1024 * 1024
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn default_mask_relay_max_bytes() -> usize {
|
||||
32 * 1024
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_classifier_prefetch_timeout_ms() -> u64 {
|
||||
5
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_timing_normalization_enabled() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_timing_normalization_floor_ms() -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_timing_normalization_ceiling_ms() -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_stun_servers() -> Vec<String> {
|
||||
vec![
|
||||
"stun.l.google.com:5349".to_string(),
|
||||
@@ -410,13 +713,41 @@ pub(crate) fn default_proxy_secret_len_max() -> usize {
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_reinit_drain_timeout_secs() -> u64 {
|
||||
120
|
||||
90
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_ttl_secs() -> u64 {
|
||||
90
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_instadrain() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_threshold() -> u64 {
|
||||
32
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_soft_evict_enabled() -> bool {
|
||||
DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_ENABLED
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_soft_evict_grace_secs() -> u64 {
|
||||
DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_GRACE_SECS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_soft_evict_per_writer() -> u8 {
|
||||
DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_PER_WRITER
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_soft_evict_budget_per_core() -> u16 {
|
||||
DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_BUDGET_PER_CORE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_soft_evict_cooldown_ms() -> u64 {
|
||||
DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_COOLDOWN_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_bind_stale_ttl_secs() -> u64 {
|
||||
default_me_pool_drain_ttl_secs()
|
||||
}
|
||||
@@ -464,6 +795,14 @@ pub(crate) fn default_access_users() -> HashMap<String, String> {
|
||||
)])
|
||||
}
|
||||
|
||||
pub(crate) fn default_user_max_unique_ips_window_secs() -> u64 {
|
||||
DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS
|
||||
}
|
||||
|
||||
pub(crate) fn default_user_max_unique_ips_global_each() -> usize {
|
||||
0
|
||||
}
|
||||
|
||||
// Custom deserializer helpers
|
||||
|
||||
#[derive(Deserialize)]
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
1356
src/config/load.rs
1356
src/config/load.rs
File diff suppressed because it is too large
Load Diff
@@ -1,9 +1,9 @@
|
||||
//! Configuration.
|
||||
|
||||
pub(crate) mod defaults;
|
||||
mod types;
|
||||
mod load;
|
||||
pub mod hot_reload;
|
||||
mod load;
|
||||
mod types;
|
||||
|
||||
pub use load::ProxyConfig;
|
||||
pub use types::*;
|
||||
|
||||
80
src/config/tests/load_idle_policy_tests.rs
Normal file
80
src/config/tests/load_idle_policy_tests.rs
Normal file
@@ -0,0 +1,80 @@
|
||||
use super::*;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn write_temp_config(contents: &str) -> PathBuf {
|
||||
let nonce = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("system time must be after unix epoch")
|
||||
.as_nanos();
|
||||
let path = std::env::temp_dir().join(format!("telemt-idle-policy-{nonce}.toml"));
|
||||
fs::write(&path, contents).expect("temp config write must succeed");
|
||||
path
|
||||
}
|
||||
|
||||
fn remove_temp_config(path: &PathBuf) {
|
||||
let _ = fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_relay_hard_idle_smaller_than_soft_idle_with_clear_error() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
relay_client_idle_soft_secs = 120
|
||||
relay_client_idle_hard_secs = 60
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path).expect_err("config with hard<soft must fail");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains(
|
||||
"timeouts.relay_client_idle_hard_secs must be >= timeouts.relay_client_idle_soft_secs"
|
||||
),
|
||||
"error must explain the violated hard>=soft invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_relay_grace_larger_than_hard_idle_with_clear_error() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
relay_client_idle_soft_secs = 60
|
||||
relay_client_idle_hard_secs = 120
|
||||
relay_idle_grace_after_downstream_activity_secs = 121
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path).expect_err("config with grace>hard must fail");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("timeouts.relay_idle_grace_after_downstream_activity_secs must be <= timeouts.relay_client_idle_hard_secs"),
|
||||
"error must explain the violated grace<=hard invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_zero_handshake_timeout_with_clear_error() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
client_handshake = 0
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path).expect_err("config with zero handshake timeout must fail");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("timeouts.client_handshake must be > 0"),
|
||||
"error must explain that handshake timeout must be positive, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
@@ -0,0 +1,76 @@
|
||||
use super::*;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn write_temp_config(contents: &str) -> PathBuf {
|
||||
let nonce = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("system time must be after unix epoch")
|
||||
.as_nanos();
|
||||
let path = std::env::temp_dir().join(format!(
|
||||
"telemt-load-mask-prefetch-timeout-security-{nonce}.toml"
|
||||
));
|
||||
fs::write(&path, contents).expect("temp config write must succeed");
|
||||
path
|
||||
}
|
||||
|
||||
fn remove_temp_config(path: &PathBuf) {
|
||||
let _ = fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_mask_classifier_prefetch_timeout_below_min_bound() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_classifier_prefetch_timeout_ms = 4
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path)
|
||||
.expect_err("prefetch timeout below minimum security bound must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_classifier_prefetch_timeout_ms must be within [5, 50]"),
|
||||
"error must explain timeout bound invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_mask_classifier_prefetch_timeout_above_max_bound() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_classifier_prefetch_timeout_ms = 51
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path)
|
||||
.expect_err("prefetch timeout above max security bound must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_classifier_prefetch_timeout_ms must be within [5, 50]"),
|
||||
"error must explain timeout bound invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_mask_classifier_prefetch_timeout_within_bounds() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_classifier_prefetch_timeout_ms = 20
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg =
|
||||
ProxyConfig::load(&path).expect("prefetch timeout within security bounds must be accepted");
|
||||
assert_eq!(cfg.censorship.mask_classifier_prefetch_timeout_ms, 20);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
292
src/config/tests/load_mask_shape_security_tests.rs
Normal file
292
src/config/tests/load_mask_shape_security_tests.rs
Normal file
@@ -0,0 +1,292 @@
|
||||
use super::*;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn write_temp_config(contents: &str) -> PathBuf {
|
||||
let nonce = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("system time must be after unix epoch")
|
||||
.as_nanos();
|
||||
let path = std::env::temp_dir().join(format!("telemt-load-mask-shape-security-{nonce}.toml"));
|
||||
fs::write(&path, contents).expect("temp config write must succeed");
|
||||
path
|
||||
}
|
||||
|
||||
fn remove_temp_config(path: &PathBuf) {
|
||||
let _ = fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_zero_mask_shape_bucket_floor_bytes() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_bucket_floor_bytes = 0
|
||||
mask_shape_bucket_cap_bytes = 4096
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("zero mask_shape_bucket_floor_bytes must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_shape_bucket_floor_bytes must be > 0"),
|
||||
"error must explain floor>0 invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_mask_shape_bucket_cap_less_than_floor() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_bucket_floor_bytes = 1024
|
||||
mask_shape_bucket_cap_bytes = 512
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("mask_shape_bucket_cap_bytes < floor must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains(
|
||||
"censorship.mask_shape_bucket_cap_bytes must be >= censorship.mask_shape_bucket_floor_bytes"
|
||||
),
|
||||
"error must explain cap>=floor invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_mask_shape_bucket_cap_equal_to_floor() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_hardening = true
|
||||
mask_shape_bucket_floor_bytes = 1024
|
||||
mask_shape_bucket_cap_bytes = 1024
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg = ProxyConfig::load(&path).expect("equal cap and floor must be accepted");
|
||||
assert!(cfg.censorship.mask_shape_hardening);
|
||||
assert_eq!(cfg.censorship.mask_shape_bucket_floor_bytes, 1024);
|
||||
assert_eq!(cfg.censorship.mask_shape_bucket_cap_bytes, 1024);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_above_cap_blur_when_shape_hardening_disabled() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_hardening = false
|
||||
mask_shape_above_cap_blur = true
|
||||
mask_shape_above_cap_blur_max_bytes = 64
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("above-cap blur must require shape hardening enabled");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains(
|
||||
"censorship.mask_shape_above_cap_blur requires censorship.mask_shape_hardening = true"
|
||||
),
|
||||
"error must explain blur prerequisite, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_above_cap_blur_with_zero_max_bytes() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_hardening = true
|
||||
mask_shape_above_cap_blur = true
|
||||
mask_shape_above_cap_blur_max_bytes = 0
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("above-cap blur max bytes must be > 0 when enabled");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_shape_above_cap_blur_max_bytes must be > 0 when censorship.mask_shape_above_cap_blur is enabled"),
|
||||
"error must explain blur max bytes invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_timing_normalization_floor_zero_when_enabled() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_timing_normalization_enabled = true
|
||||
mask_timing_normalization_floor_ms = 0
|
||||
mask_timing_normalization_ceiling_ms = 200
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("timing normalization floor must be > 0 when enabled");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_timing_normalization_floor_ms must be > 0 when censorship.mask_timing_normalization_enabled is true"),
|
||||
"error must explain timing floor invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_timing_normalization_ceiling_below_floor() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_timing_normalization_enabled = true
|
||||
mask_timing_normalization_floor_ms = 220
|
||||
mask_timing_normalization_ceiling_ms = 200
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path).expect_err("timing normalization ceiling must be >= floor");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_timing_normalization_ceiling_ms must be >= censorship.mask_timing_normalization_floor_ms"),
|
||||
"error must explain timing ceiling/floor invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_valid_timing_normalization_and_above_cap_blur_config() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_hardening = true
|
||||
mask_shape_above_cap_blur = true
|
||||
mask_shape_above_cap_blur_max_bytes = 128
|
||||
mask_timing_normalization_enabled = true
|
||||
mask_timing_normalization_floor_ms = 150
|
||||
mask_timing_normalization_ceiling_ms = 240
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg = ProxyConfig::load(&path)
|
||||
.expect("valid blur and timing normalization settings must be accepted");
|
||||
assert!(cfg.censorship.mask_shape_hardening);
|
||||
assert!(cfg.censorship.mask_shape_above_cap_blur);
|
||||
assert_eq!(cfg.censorship.mask_shape_above_cap_blur_max_bytes, 128);
|
||||
assert!(cfg.censorship.mask_timing_normalization_enabled);
|
||||
assert_eq!(cfg.censorship.mask_timing_normalization_floor_ms, 150);
|
||||
assert_eq!(cfg.censorship.mask_timing_normalization_ceiling_ms, 240);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_aggressive_shape_mode_when_shape_hardening_disabled() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_hardening = false
|
||||
mask_shape_hardening_aggressive_mode = true
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path)
|
||||
.expect_err("aggressive shape hardening mode must require shape hardening enabled");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_shape_hardening_aggressive_mode requires censorship.mask_shape_hardening = true"),
|
||||
"error must explain aggressive-mode prerequisite, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_aggressive_shape_mode_when_shape_hardening_enabled() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_hardening = true
|
||||
mask_shape_hardening_aggressive_mode = true
|
||||
mask_shape_above_cap_blur = true
|
||||
mask_shape_above_cap_blur_max_bytes = 8
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg = ProxyConfig::load(&path)
|
||||
.expect("aggressive shape hardening mode should be accepted when prerequisites are met");
|
||||
assert!(cfg.censorship.mask_shape_hardening);
|
||||
assert!(cfg.censorship.mask_shape_hardening_aggressive_mode);
|
||||
assert!(cfg.censorship.mask_shape_above_cap_blur);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_zero_mask_relay_max_bytes() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_relay_max_bytes = 0
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path).expect_err("mask_relay_max_bytes must be > 0");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_relay_max_bytes must be > 0"),
|
||||
"error must explain non-zero relay cap invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_mask_relay_max_bytes_above_upper_bound() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_relay_max_bytes = 67108865
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("mask_relay_max_bytes above hard cap must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_relay_max_bytes must be <= 67108864"),
|
||||
"error must explain relay cap upper bound invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_valid_mask_relay_max_bytes() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_relay_max_bytes = 8388608
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg = ProxyConfig::load(&path).expect("valid mask_relay_max_bytes must be accepted");
|
||||
assert_eq!(cfg.censorship.mask_relay_max_bytes, 8_388_608);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
88
src/config/tests/load_security_tests.rs
Normal file
88
src/config/tests/load_security_tests.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
use super::*;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn write_temp_config(contents: &str) -> PathBuf {
|
||||
let nonce = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("system time must be after unix epoch")
|
||||
.as_nanos();
|
||||
let path = std::env::temp_dir().join(format!("telemt-load-security-{nonce}.toml"));
|
||||
fs::write(&path, contents).expect("temp config write must succeed");
|
||||
path
|
||||
}
|
||||
|
||||
fn remove_temp_config(path: &PathBuf) {
|
||||
let _ = fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_server_hello_delay_equal_to_handshake_timeout_budget() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
client_handshake = 1
|
||||
|
||||
[censorship]
|
||||
server_hello_delay_max_ms = 1000
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("delay equal to handshake timeout must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains(
|
||||
"censorship.server_hello_delay_max_ms must be < timeouts.client_handshake * 1000"
|
||||
),
|
||||
"error must explain delay<timeout invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_server_hello_delay_larger_than_handshake_timeout_budget() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
client_handshake = 1
|
||||
|
||||
[censorship]
|
||||
server_hello_delay_max_ms = 1500
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("delay larger than handshake timeout must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains(
|
||||
"censorship.server_hello_delay_max_ms must be < timeouts.client_handshake * 1000"
|
||||
),
|
||||
"error must explain delay<timeout invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_server_hello_delay_strictly_below_handshake_timeout_budget() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
client_handshake = 1
|
||||
|
||||
[censorship]
|
||||
server_hello_delay_max_ms = 999
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg =
|
||||
ProxyConfig::load(&path).expect("delay below handshake timeout budget must be accepted");
|
||||
assert_eq!(cfg.timeouts.client_handshake, 1);
|
||||
assert_eq!(cfg.censorship.server_hello_delay_max_ms, 999);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
@@ -3,6 +3,7 @@ use ipnetwork::IpNetwork;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::net::IpAddr;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use super::defaults::*;
|
||||
|
||||
@@ -134,8 +135,8 @@ impl MeSocksKdfPolicy {
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum MeBindStaleMode {
|
||||
Never,
|
||||
#[default]
|
||||
Never,
|
||||
Ttl,
|
||||
Always,
|
||||
}
|
||||
@@ -183,6 +184,74 @@ impl MeFloorMode {
|
||||
}
|
||||
}
|
||||
|
||||
/// Middle-End route behavior when no writer is immediately available.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum MeRouteNoWriterMode {
|
||||
AsyncRecoveryFailfast,
|
||||
InlineRecoveryLegacy,
|
||||
#[default]
|
||||
HybridAsyncPersistent,
|
||||
}
|
||||
|
||||
impl MeRouteNoWriterMode {
|
||||
pub fn as_u8(self) -> u8 {
|
||||
match self {
|
||||
MeRouteNoWriterMode::AsyncRecoveryFailfast => 0,
|
||||
MeRouteNoWriterMode::InlineRecoveryLegacy => 1,
|
||||
MeRouteNoWriterMode::HybridAsyncPersistent => 2,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_u8(raw: u8) -> Self {
|
||||
match raw {
|
||||
0 => MeRouteNoWriterMode::AsyncRecoveryFailfast,
|
||||
1 => MeRouteNoWriterMode::InlineRecoveryLegacy,
|
||||
2 => MeRouteNoWriterMode::HybridAsyncPersistent,
|
||||
_ => MeRouteNoWriterMode::HybridAsyncPersistent,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Middle-End writer selection mode for new client bindings.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum MeWriterPickMode {
|
||||
SortedRr,
|
||||
#[default]
|
||||
P2c,
|
||||
}
|
||||
|
||||
impl MeWriterPickMode {
|
||||
pub fn as_u8(self) -> u8 {
|
||||
match self {
|
||||
MeWriterPickMode::SortedRr => 0,
|
||||
MeWriterPickMode::P2c => 1,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_u8(raw: u8) -> Self {
|
||||
match raw {
|
||||
0 => MeWriterPickMode::SortedRr,
|
||||
1 => MeWriterPickMode::P2c,
|
||||
_ => MeWriterPickMode::P2c,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Per-user unique source IP limit mode.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum UserMaxUniqueIpsMode {
|
||||
/// Count only currently active source IPs.
|
||||
#[default]
|
||||
ActiveWindow,
|
||||
/// Count source IPs seen within the recent time window.
|
||||
TimeWindow,
|
||||
/// Enforce both active and recent-window limits at the same time.
|
||||
Combined,
|
||||
}
|
||||
|
||||
/// Telemetry controls for hot-path counters and ME diagnostics.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct TelemetryConfig {
|
||||
@@ -288,6 +357,9 @@ impl Default for NetworkConfig {
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GeneralConfig {
|
||||
#[serde(default)]
|
||||
pub data_path: Option<PathBuf>,
|
||||
|
||||
#[serde(default)]
|
||||
pub modes: ProxyModes,
|
||||
|
||||
@@ -305,6 +377,14 @@ pub struct GeneralConfig {
|
||||
#[serde(default = "default_proxy_secret_path")]
|
||||
pub proxy_secret_path: Option<String>,
|
||||
|
||||
/// Optional path to cache raw getProxyConfig (IPv4) snapshot for startup fallback.
|
||||
#[serde(default = "default_proxy_config_v4_cache_path")]
|
||||
pub proxy_config_v4_cache_path: Option<String>,
|
||||
|
||||
/// Optional path to cache raw getProxyConfigV6 snapshot for startup fallback.
|
||||
#[serde(default = "default_proxy_config_v6_cache_path")]
|
||||
pub proxy_config_v6_cache_path: Option<String>,
|
||||
|
||||
/// Global ad_tag (32 hex chars from @MTProxybot). Fallback when user has no per-user tag in access.user_ad_tags.
|
||||
#[serde(default)]
|
||||
pub ad_tag: Option<String>,
|
||||
@@ -340,6 +420,15 @@ pub struct GeneralConfig {
|
||||
#[serde(default = "default_middle_proxy_warm_standby")]
|
||||
pub middle_proxy_warm_standby: usize,
|
||||
|
||||
/// Startup retries for Middle-End pool initialization before ME→Direct fallback.
|
||||
/// 0 means unlimited retries.
|
||||
#[serde(default = "default_me_init_retry_attempts")]
|
||||
pub me_init_retry_attempts: u32,
|
||||
|
||||
/// Allow fallback from Middle-End mode to direct DC when ME startup cannot be initialized.
|
||||
#[serde(default = "default_me2dc_fallback")]
|
||||
pub me2dc_fallback: bool,
|
||||
|
||||
/// Enable ME keepalive padding frames.
|
||||
#[serde(default = "default_true")]
|
||||
pub me_keepalive_enabled: bool,
|
||||
@@ -361,6 +450,61 @@ pub struct GeneralConfig {
|
||||
#[serde(default = "default_rpc_proxy_req_every")]
|
||||
pub rpc_proxy_req_every: u64,
|
||||
|
||||
/// Capacity of per-ME writer command channel.
|
||||
#[serde(default = "default_me_writer_cmd_channel_capacity")]
|
||||
pub me_writer_cmd_channel_capacity: usize,
|
||||
|
||||
/// Capacity of per-connection ME response route channel.
|
||||
#[serde(default = "default_me_route_channel_capacity")]
|
||||
pub me_route_channel_capacity: usize,
|
||||
|
||||
/// Capacity of per-client command queue from client reader to ME sender task.
|
||||
#[serde(default = "default_me_c2me_channel_capacity")]
|
||||
pub me_c2me_channel_capacity: usize,
|
||||
|
||||
/// Maximum wait in milliseconds for enqueueing C2ME commands when the queue is full.
|
||||
/// `0` keeps legacy unbounded wait behavior.
|
||||
#[serde(default = "default_me_c2me_send_timeout_ms")]
|
||||
pub me_c2me_send_timeout_ms: u64,
|
||||
|
||||
/// Bounded wait in milliseconds for routing ME DATA to per-connection queue.
|
||||
/// `0` keeps non-blocking routing; values >0 enable bounded wait for compatibility.
|
||||
#[serde(default = "default_me_reader_route_data_wait_ms")]
|
||||
pub me_reader_route_data_wait_ms: u64,
|
||||
|
||||
/// Maximum number of ME->Client responses coalesced before flush.
|
||||
#[serde(default = "default_me_d2c_flush_batch_max_frames")]
|
||||
pub me_d2c_flush_batch_max_frames: usize,
|
||||
|
||||
/// Maximum total payload bytes coalesced before flush.
|
||||
#[serde(default = "default_me_d2c_flush_batch_max_bytes")]
|
||||
pub me_d2c_flush_batch_max_bytes: usize,
|
||||
|
||||
/// Maximum wait in microseconds to coalesce additional ME->Client responses.
|
||||
/// `0` disables timed coalescing.
|
||||
#[serde(default = "default_me_d2c_flush_batch_max_delay_us")]
|
||||
pub me_d2c_flush_batch_max_delay_us: u64,
|
||||
|
||||
/// Flush client writer immediately after quick-ack write.
|
||||
#[serde(default = "default_me_d2c_ack_flush_immediate")]
|
||||
pub me_d2c_ack_flush_immediate: bool,
|
||||
|
||||
/// Additional bytes above strict per-user quota allowed in hot-path soft mode.
|
||||
#[serde(default = "default_me_quota_soft_overshoot_bytes")]
|
||||
pub me_quota_soft_overshoot_bytes: u64,
|
||||
|
||||
/// Shrink threshold for reusable ME->Client frame assembly buffer.
|
||||
#[serde(default = "default_me_d2c_frame_buf_shrink_threshold_bytes")]
|
||||
pub me_d2c_frame_buf_shrink_threshold_bytes: usize,
|
||||
|
||||
/// Copy buffer size for client->DC direction in direct relay.
|
||||
#[serde(default = "default_direct_relay_copy_buf_c2s_bytes")]
|
||||
pub direct_relay_copy_buf_c2s_bytes: usize,
|
||||
|
||||
/// Copy buffer size for DC->client direction in direct relay.
|
||||
#[serde(default = "default_direct_relay_copy_buf_s2c_bytes")]
|
||||
pub direct_relay_copy_buf_s2c_bytes: usize,
|
||||
|
||||
/// Max pending ciphertext buffer per client writer (bytes).
|
||||
/// Controls FakeTLS backpressure vs throughput.
|
||||
#[serde(default = "default_crypto_pending_buffer")]
|
||||
@@ -461,10 +605,47 @@ pub struct GeneralConfig {
|
||||
#[serde(default = "default_me_adaptive_floor_min_writers_single_endpoint")]
|
||||
pub me_adaptive_floor_min_writers_single_endpoint: u8,
|
||||
|
||||
/// Minimum writer target for multi-endpoint DC groups in adaptive floor mode.
|
||||
#[serde(default = "default_me_adaptive_floor_min_writers_multi_endpoint")]
|
||||
pub me_adaptive_floor_min_writers_multi_endpoint: u8,
|
||||
|
||||
/// Grace period in seconds to hold static floor after activity in adaptive mode.
|
||||
#[serde(default = "default_me_adaptive_floor_recover_grace_secs")]
|
||||
pub me_adaptive_floor_recover_grace_secs: u64,
|
||||
|
||||
/// Global ME writer budget per logical CPU core in adaptive mode.
|
||||
#[serde(default = "default_me_adaptive_floor_writers_per_core_total")]
|
||||
pub me_adaptive_floor_writers_per_core_total: u16,
|
||||
|
||||
/// Override logical CPU core count for adaptive floor calculations.
|
||||
/// Set to 0 to use runtime auto-detection.
|
||||
#[serde(default = "default_me_adaptive_floor_cpu_cores_override")]
|
||||
pub me_adaptive_floor_cpu_cores_override: u16,
|
||||
|
||||
/// Per-core max extra writers above base required floor for single-endpoint DC groups.
|
||||
#[serde(default = "default_me_adaptive_floor_max_extra_writers_single_per_core")]
|
||||
pub me_adaptive_floor_max_extra_writers_single_per_core: u16,
|
||||
|
||||
/// Per-core max extra writers above base required floor for multi-endpoint DC groups.
|
||||
#[serde(default = "default_me_adaptive_floor_max_extra_writers_multi_per_core")]
|
||||
pub me_adaptive_floor_max_extra_writers_multi_per_core: u16,
|
||||
|
||||
/// Hard cap for active ME writers per logical CPU core.
|
||||
#[serde(default = "default_me_adaptive_floor_max_active_writers_per_core")]
|
||||
pub me_adaptive_floor_max_active_writers_per_core: u16,
|
||||
|
||||
/// Hard cap for warm ME writers per logical CPU core.
|
||||
#[serde(default = "default_me_adaptive_floor_max_warm_writers_per_core")]
|
||||
pub me_adaptive_floor_max_warm_writers_per_core: u16,
|
||||
|
||||
/// Hard global cap for active ME writers.
|
||||
#[serde(default = "default_me_adaptive_floor_max_active_writers_global")]
|
||||
pub me_adaptive_floor_max_active_writers_global: u32,
|
||||
|
||||
/// Hard global cap for warm ME writers.
|
||||
#[serde(default = "default_me_adaptive_floor_max_warm_writers_global")]
|
||||
pub me_adaptive_floor_max_warm_writers_global: u32,
|
||||
|
||||
/// Connect attempts for the selected upstream before returning error/fallback.
|
||||
#[serde(default = "default_upstream_connect_retry_attempts")]
|
||||
pub upstream_connect_retry_attempts: u32,
|
||||
@@ -473,6 +654,10 @@ pub struct GeneralConfig {
|
||||
#[serde(default = "default_upstream_connect_retry_backoff_ms")]
|
||||
pub upstream_connect_retry_backoff_ms: u64,
|
||||
|
||||
/// Total wall-clock budget in milliseconds for one upstream connect request across retries.
|
||||
#[serde(default = "default_upstream_connect_budget_ms")]
|
||||
pub upstream_connect_budget_ms: u64,
|
||||
|
||||
/// Consecutive failed requests before upstream is marked unhealthy.
|
||||
#[serde(default = "default_upstream_unhealthy_fail_threshold")]
|
||||
pub upstream_unhealthy_fail_threshold: u32,
|
||||
@@ -489,6 +674,10 @@ pub struct GeneralConfig {
|
||||
#[serde(default = "default_unknown_dc_log_path")]
|
||||
pub unknown_dc_log_path: Option<String>,
|
||||
|
||||
/// Enable unknown-DC file logging.
|
||||
#[serde(default = "default_unknown_dc_file_log_enabled")]
|
||||
pub unknown_dc_file_log_enabled: bool,
|
||||
|
||||
#[serde(default)]
|
||||
pub log_level: LogLevel,
|
||||
|
||||
@@ -516,6 +705,47 @@ pub struct GeneralConfig {
|
||||
#[serde(default = "default_me_route_backpressure_high_watermark_pct")]
|
||||
pub me_route_backpressure_high_watermark_pct: u8,
|
||||
|
||||
/// Health monitor interval in milliseconds while writer coverage is degraded.
|
||||
#[serde(default = "default_me_health_interval_ms_unhealthy")]
|
||||
pub me_health_interval_ms_unhealthy: u64,
|
||||
|
||||
/// Health monitor interval in milliseconds while writer coverage is stable.
|
||||
#[serde(default = "default_me_health_interval_ms_healthy")]
|
||||
pub me_health_interval_ms_healthy: u64,
|
||||
|
||||
/// Poll interval in milliseconds for conditional-admission state checks.
|
||||
#[serde(default = "default_me_admission_poll_ms")]
|
||||
pub me_admission_poll_ms: u64,
|
||||
|
||||
/// Cooldown for repetitive ME warning logs in milliseconds.
|
||||
#[serde(default = "default_me_warn_rate_limit_ms")]
|
||||
pub me_warn_rate_limit_ms: u64,
|
||||
|
||||
/// ME route behavior when no writer is immediately available.
|
||||
#[serde(default)]
|
||||
pub me_route_no_writer_mode: MeRouteNoWriterMode,
|
||||
|
||||
/// Maximum wait time in milliseconds for async-recovery failfast mode.
|
||||
#[serde(default = "default_me_route_no_writer_wait_ms")]
|
||||
pub me_route_no_writer_wait_ms: u64,
|
||||
|
||||
/// Maximum cumulative wait in milliseconds for hybrid no-writer mode before failfast.
|
||||
#[serde(default = "default_me_route_hybrid_max_wait_ms")]
|
||||
pub me_route_hybrid_max_wait_ms: u64,
|
||||
|
||||
/// Maximum wait in milliseconds for blocking ME writer channel send fallback.
|
||||
/// `0` keeps legacy unbounded wait behavior.
|
||||
#[serde(default = "default_me_route_blocking_send_timeout_ms")]
|
||||
pub me_route_blocking_send_timeout_ms: u64,
|
||||
|
||||
/// Number of inline recovery attempts in legacy mode.
|
||||
#[serde(default = "default_me_route_inline_recovery_attempts")]
|
||||
pub me_route_inline_recovery_attempts: u32,
|
||||
|
||||
/// Maximum wait time in milliseconds for inline recovery in legacy mode.
|
||||
#[serde(default = "default_me_route_inline_recovery_wait_ms")]
|
||||
pub me_route_inline_recovery_wait_ms: u64,
|
||||
|
||||
/// [general.links] — proxy link generation overrides.
|
||||
#[serde(default)]
|
||||
pub links: LinksConfig,
|
||||
@@ -590,6 +820,35 @@ pub struct GeneralConfig {
|
||||
#[serde(default = "default_me_pool_drain_ttl_secs")]
|
||||
pub me_pool_drain_ttl_secs: u64,
|
||||
|
||||
/// Force-remove any draining writer on the next cleanup tick, regardless of age/deadline.
|
||||
#[serde(default = "default_me_instadrain")]
|
||||
pub me_instadrain: bool,
|
||||
|
||||
/// Maximum allowed number of draining ME writers before oldest ones are force-closed in batches.
|
||||
/// Set to 0 to disable threshold-based draining cleanup and keep timeout-only behavior.
|
||||
#[serde(default = "default_me_pool_drain_threshold")]
|
||||
pub me_pool_drain_threshold: u64,
|
||||
|
||||
/// Enable staged client eviction for draining ME writers that remain non-empty past TTL.
|
||||
#[serde(default = "default_me_pool_drain_soft_evict_enabled")]
|
||||
pub me_pool_drain_soft_evict_enabled: bool,
|
||||
|
||||
/// Extra grace in seconds after drain TTL before soft-eviction stage starts.
|
||||
#[serde(default = "default_me_pool_drain_soft_evict_grace_secs")]
|
||||
pub me_pool_drain_soft_evict_grace_secs: u64,
|
||||
|
||||
/// Maximum number of client sessions to evict from one draining writer per health tick.
|
||||
#[serde(default = "default_me_pool_drain_soft_evict_per_writer")]
|
||||
pub me_pool_drain_soft_evict_per_writer: u8,
|
||||
|
||||
/// Soft-eviction budget per CPU core for one health tick.
|
||||
#[serde(default = "default_me_pool_drain_soft_evict_budget_per_core")]
|
||||
pub me_pool_drain_soft_evict_budget_per_core: u16,
|
||||
|
||||
/// Cooldown for repetitive soft-eviction on the same writer in milliseconds.
|
||||
#[serde(default = "default_me_pool_drain_soft_evict_cooldown_ms")]
|
||||
pub me_pool_drain_soft_evict_cooldown_ms: u64,
|
||||
|
||||
/// Policy for new binds on stale draining writers.
|
||||
#[serde(default)]
|
||||
pub me_bind_stale_mode: MeBindStaleMode,
|
||||
@@ -604,7 +863,7 @@ pub struct GeneralConfig {
|
||||
pub me_pool_min_fresh_ratio: f32,
|
||||
|
||||
/// Drain timeout in seconds for stale ME writers after endpoint map changes.
|
||||
/// Set to 0 to keep stale writers draining indefinitely (no force-close).
|
||||
/// Set to 0 to use the runtime safety fallback timeout.
|
||||
#[serde(default = "default_me_reinit_drain_timeout_secs")]
|
||||
pub me_reinit_drain_timeout_secs: u64,
|
||||
|
||||
@@ -634,6 +893,14 @@ pub struct GeneralConfig {
|
||||
#[serde(default = "default_me_deterministic_writer_sort")]
|
||||
pub me_deterministic_writer_sort: bool,
|
||||
|
||||
/// Writer selection mode for ME route bind path.
|
||||
#[serde(default)]
|
||||
pub me_writer_pick_mode: MeWriterPickMode,
|
||||
|
||||
/// Number of candidates sampled by writer picker in `p2c` mode.
|
||||
#[serde(default = "default_me_writer_pick_sample_size")]
|
||||
pub me_writer_pick_sample_size: u8,
|
||||
|
||||
/// Enable NTP drift check at startup.
|
||||
#[serde(default = "default_ntp_check")]
|
||||
pub ntp_check: bool,
|
||||
@@ -654,12 +921,15 @@ pub struct GeneralConfig {
|
||||
impl Default for GeneralConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
data_path: None,
|
||||
modes: ProxyModes::default(),
|
||||
prefer_ipv6: false,
|
||||
fast_mode: default_true(),
|
||||
use_middle_proxy: default_true(),
|
||||
ad_tag: None,
|
||||
proxy_secret_path: default_proxy_secret_path(),
|
||||
proxy_config_v4_cache_path: default_proxy_config_v4_cache_path(),
|
||||
proxy_config_v6_cache_path: default_proxy_config_v6_cache_path(),
|
||||
middle_proxy_nat_ip: None,
|
||||
middle_proxy_nat_probe: default_true(),
|
||||
middle_proxy_nat_stun: default_middle_proxy_nat_stun(),
|
||||
@@ -667,11 +937,27 @@ impl Default for GeneralConfig {
|
||||
stun_nat_probe_concurrency: default_stun_nat_probe_concurrency(),
|
||||
middle_proxy_pool_size: default_pool_size(),
|
||||
middle_proxy_warm_standby: default_middle_proxy_warm_standby(),
|
||||
me_init_retry_attempts: default_me_init_retry_attempts(),
|
||||
me2dc_fallback: default_me2dc_fallback(),
|
||||
me_keepalive_enabled: default_true(),
|
||||
me_keepalive_interval_secs: default_keepalive_interval(),
|
||||
me_keepalive_jitter_secs: default_keepalive_jitter(),
|
||||
me_keepalive_payload_random: default_true(),
|
||||
rpc_proxy_req_every: default_rpc_proxy_req_every(),
|
||||
me_writer_cmd_channel_capacity: default_me_writer_cmd_channel_capacity(),
|
||||
me_route_channel_capacity: default_me_route_channel_capacity(),
|
||||
me_c2me_channel_capacity: default_me_c2me_channel_capacity(),
|
||||
me_c2me_send_timeout_ms: default_me_c2me_send_timeout_ms(),
|
||||
me_reader_route_data_wait_ms: default_me_reader_route_data_wait_ms(),
|
||||
me_d2c_flush_batch_max_frames: default_me_d2c_flush_batch_max_frames(),
|
||||
me_d2c_flush_batch_max_bytes: default_me_d2c_flush_batch_max_bytes(),
|
||||
me_d2c_flush_batch_max_delay_us: default_me_d2c_flush_batch_max_delay_us(),
|
||||
me_d2c_ack_flush_immediate: default_me_d2c_ack_flush_immediate(),
|
||||
me_quota_soft_overshoot_bytes: default_me_quota_soft_overshoot_bytes(),
|
||||
me_d2c_frame_buf_shrink_threshold_bytes:
|
||||
default_me_d2c_frame_buf_shrink_threshold_bytes(),
|
||||
direct_relay_copy_buf_c2s_bytes: default_direct_relay_copy_buf_c2s_bytes(),
|
||||
direct_relay_copy_buf_s2c_bytes: default_direct_relay_copy_buf_s2c_bytes(),
|
||||
me_warmup_stagger_enabled: default_true(),
|
||||
me_warmup_step_delay_ms: default_warmup_step_delay_ms(),
|
||||
me_warmup_step_jitter_ms: default_warmup_step_jitter_ms(),
|
||||
@@ -680,28 +966,64 @@ impl Default for GeneralConfig {
|
||||
me_reconnect_backoff_cap_ms: default_reconnect_backoff_cap_ms(),
|
||||
me_reconnect_fast_retry_count: default_me_reconnect_fast_retry_count(),
|
||||
me_single_endpoint_shadow_writers: default_me_single_endpoint_shadow_writers(),
|
||||
me_single_endpoint_outage_mode_enabled: default_me_single_endpoint_outage_mode_enabled(),
|
||||
me_single_endpoint_outage_disable_quarantine: default_me_single_endpoint_outage_disable_quarantine(),
|
||||
me_single_endpoint_outage_backoff_min_ms: default_me_single_endpoint_outage_backoff_min_ms(),
|
||||
me_single_endpoint_outage_backoff_max_ms: default_me_single_endpoint_outage_backoff_max_ms(),
|
||||
me_single_endpoint_shadow_rotate_every_secs: default_me_single_endpoint_shadow_rotate_every_secs(),
|
||||
me_single_endpoint_outage_mode_enabled: default_me_single_endpoint_outage_mode_enabled(
|
||||
),
|
||||
me_single_endpoint_outage_disable_quarantine:
|
||||
default_me_single_endpoint_outage_disable_quarantine(),
|
||||
me_single_endpoint_outage_backoff_min_ms:
|
||||
default_me_single_endpoint_outage_backoff_min_ms(),
|
||||
me_single_endpoint_outage_backoff_max_ms:
|
||||
default_me_single_endpoint_outage_backoff_max_ms(),
|
||||
me_single_endpoint_shadow_rotate_every_secs:
|
||||
default_me_single_endpoint_shadow_rotate_every_secs(),
|
||||
me_floor_mode: MeFloorMode::default(),
|
||||
me_adaptive_floor_idle_secs: default_me_adaptive_floor_idle_secs(),
|
||||
me_adaptive_floor_min_writers_single_endpoint: default_me_adaptive_floor_min_writers_single_endpoint(),
|
||||
me_adaptive_floor_min_writers_single_endpoint:
|
||||
default_me_adaptive_floor_min_writers_single_endpoint(),
|
||||
me_adaptive_floor_min_writers_multi_endpoint:
|
||||
default_me_adaptive_floor_min_writers_multi_endpoint(),
|
||||
me_adaptive_floor_recover_grace_secs: default_me_adaptive_floor_recover_grace_secs(),
|
||||
me_adaptive_floor_writers_per_core_total:
|
||||
default_me_adaptive_floor_writers_per_core_total(),
|
||||
me_adaptive_floor_cpu_cores_override: default_me_adaptive_floor_cpu_cores_override(),
|
||||
me_adaptive_floor_max_extra_writers_single_per_core:
|
||||
default_me_adaptive_floor_max_extra_writers_single_per_core(),
|
||||
me_adaptive_floor_max_extra_writers_multi_per_core:
|
||||
default_me_adaptive_floor_max_extra_writers_multi_per_core(),
|
||||
me_adaptive_floor_max_active_writers_per_core:
|
||||
default_me_adaptive_floor_max_active_writers_per_core(),
|
||||
me_adaptive_floor_max_warm_writers_per_core:
|
||||
default_me_adaptive_floor_max_warm_writers_per_core(),
|
||||
me_adaptive_floor_max_active_writers_global:
|
||||
default_me_adaptive_floor_max_active_writers_global(),
|
||||
me_adaptive_floor_max_warm_writers_global:
|
||||
default_me_adaptive_floor_max_warm_writers_global(),
|
||||
upstream_connect_retry_attempts: default_upstream_connect_retry_attempts(),
|
||||
upstream_connect_retry_backoff_ms: default_upstream_connect_retry_backoff_ms(),
|
||||
upstream_connect_budget_ms: default_upstream_connect_budget_ms(),
|
||||
upstream_unhealthy_fail_threshold: default_upstream_unhealthy_fail_threshold(),
|
||||
upstream_connect_failfast_hard_errors: default_upstream_connect_failfast_hard_errors(),
|
||||
stun_iface_mismatch_ignore: false,
|
||||
unknown_dc_log_path: default_unknown_dc_log_path(),
|
||||
unknown_dc_file_log_enabled: default_unknown_dc_file_log_enabled(),
|
||||
log_level: LogLevel::Normal,
|
||||
disable_colors: false,
|
||||
telemetry: TelemetryConfig::default(),
|
||||
me_socks_kdf_policy: MeSocksKdfPolicy::Strict,
|
||||
me_route_backpressure_base_timeout_ms: default_me_route_backpressure_base_timeout_ms(),
|
||||
me_route_backpressure_high_timeout_ms: default_me_route_backpressure_high_timeout_ms(),
|
||||
me_route_backpressure_high_watermark_pct: default_me_route_backpressure_high_watermark_pct(),
|
||||
me_route_backpressure_high_watermark_pct:
|
||||
default_me_route_backpressure_high_watermark_pct(),
|
||||
me_health_interval_ms_unhealthy: default_me_health_interval_ms_unhealthy(),
|
||||
me_health_interval_ms_healthy: default_me_health_interval_ms_healthy(),
|
||||
me_admission_poll_ms: default_me_admission_poll_ms(),
|
||||
me_warn_rate_limit_ms: default_me_warn_rate_limit_ms(),
|
||||
me_route_no_writer_mode: MeRouteNoWriterMode::default(),
|
||||
me_route_no_writer_wait_ms: default_me_route_no_writer_wait_ms(),
|
||||
me_route_hybrid_max_wait_ms: default_me_route_hybrid_max_wait_ms(),
|
||||
me_route_blocking_send_timeout_ms: default_me_route_blocking_send_timeout_ms(),
|
||||
me_route_inline_recovery_attempts: default_me_route_inline_recovery_attempts(),
|
||||
me_route_inline_recovery_wait_ms: default_me_route_inline_recovery_wait_ms(),
|
||||
links: LinksConfig::default(),
|
||||
crypto_pending_buffer: default_crypto_pending_buffer(),
|
||||
max_client_frame: default_max_client_frame(),
|
||||
@@ -717,7 +1039,8 @@ impl Default for GeneralConfig {
|
||||
me_hardswap_warmup_delay_min_ms: default_me_hardswap_warmup_delay_min_ms(),
|
||||
me_hardswap_warmup_delay_max_ms: default_me_hardswap_warmup_delay_max_ms(),
|
||||
me_hardswap_warmup_extra_passes: default_me_hardswap_warmup_extra_passes(),
|
||||
me_hardswap_warmup_pass_backoff_base_ms: default_me_hardswap_warmup_pass_backoff_base_ms(),
|
||||
me_hardswap_warmup_pass_backoff_base_ms:
|
||||
default_me_hardswap_warmup_pass_backoff_base_ms(),
|
||||
me_config_stable_snapshots: default_me_config_stable_snapshots(),
|
||||
me_config_apply_cooldown_secs: default_me_config_apply_cooldown_secs(),
|
||||
me_snapshot_require_http_2xx: default_me_snapshot_require_http_2xx(),
|
||||
@@ -728,6 +1051,14 @@ impl Default for GeneralConfig {
|
||||
me_secret_atomic_snapshot: default_me_secret_atomic_snapshot(),
|
||||
proxy_secret_len_max: default_proxy_secret_len_max(),
|
||||
me_pool_drain_ttl_secs: default_me_pool_drain_ttl_secs(),
|
||||
me_instadrain: default_me_instadrain(),
|
||||
me_pool_drain_threshold: default_me_pool_drain_threshold(),
|
||||
me_pool_drain_soft_evict_enabled: default_me_pool_drain_soft_evict_enabled(),
|
||||
me_pool_drain_soft_evict_grace_secs: default_me_pool_drain_soft_evict_grace_secs(),
|
||||
me_pool_drain_soft_evict_per_writer: default_me_pool_drain_soft_evict_per_writer(),
|
||||
me_pool_drain_soft_evict_budget_per_core:
|
||||
default_me_pool_drain_soft_evict_budget_per_core(),
|
||||
me_pool_drain_soft_evict_cooldown_ms: default_me_pool_drain_soft_evict_cooldown_ms(),
|
||||
me_bind_stale_mode: MeBindStaleMode::default(),
|
||||
me_bind_stale_ttl_secs: default_me_bind_stale_ttl_secs(),
|
||||
me_pool_min_fresh_ratio: default_me_pool_min_fresh_ratio(),
|
||||
@@ -738,6 +1069,8 @@ impl Default for GeneralConfig {
|
||||
me_reinit_trigger_channel: default_me_reinit_trigger_channel(),
|
||||
me_reinit_coalesce_window_ms: default_me_reinit_coalesce_window_ms(),
|
||||
me_deterministic_writer_sort: default_me_deterministic_writer_sort(),
|
||||
me_writer_pick_mode: MeWriterPickMode::default(),
|
||||
me_writer_pick_sample_size: default_me_writer_pick_sample_size(),
|
||||
ntp_check: default_ntp_check(),
|
||||
ntp_servers: default_ntp_servers(),
|
||||
auto_degradation_enabled: default_true(),
|
||||
@@ -750,8 +1083,10 @@ impl GeneralConfig {
|
||||
/// Resolve the active updater interval for ME infrastructure refresh tasks.
|
||||
/// `update_every` has priority, otherwise legacy proxy_*_auto_reload_secs are used.
|
||||
pub fn effective_update_every_secs(&self) -> u64 {
|
||||
self.update_every
|
||||
.unwrap_or_else(|| self.proxy_secret_auto_reload_secs.min(self.proxy_config_auto_reload_secs))
|
||||
self.update_every.unwrap_or_else(|| {
|
||||
self.proxy_secret_auto_reload_secs
|
||||
.min(self.proxy_config_auto_reload_secs)
|
||||
})
|
||||
}
|
||||
|
||||
/// Resolve periodic zero-downtime reinit interval for ME writers.
|
||||
@@ -761,8 +1096,13 @@ impl GeneralConfig {
|
||||
|
||||
/// Resolve force-close timeout for stale writers.
|
||||
/// `me_reinit_drain_timeout_secs` remains backward-compatible alias.
|
||||
/// A configured `0` uses the runtime safety fallback (300s).
|
||||
pub fn effective_me_pool_force_close_secs(&self) -> u64 {
|
||||
self.me_reinit_drain_timeout_secs
|
||||
if self.me_reinit_drain_timeout_secs == 0 {
|
||||
300
|
||||
} else {
|
||||
self.me_reinit_drain_timeout_secs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -797,7 +1137,7 @@ impl Default for LinksConfig {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct ApiConfig {
|
||||
/// Enable or disable REST API.
|
||||
#[serde(default)]
|
||||
#[serde(default = "default_true")]
|
||||
pub enabled: bool,
|
||||
|
||||
/// Listen address for API in `IP:PORT` format.
|
||||
@@ -825,6 +1165,22 @@ pub struct ApiConfig {
|
||||
#[serde(default = "default_api_minimal_runtime_cache_ttl_ms")]
|
||||
pub minimal_runtime_cache_ttl_ms: u64,
|
||||
|
||||
/// Enables runtime edge endpoints with optional cached aggregation.
|
||||
#[serde(default = "default_api_runtime_edge_enabled")]
|
||||
pub runtime_edge_enabled: bool,
|
||||
|
||||
/// Cache TTL for runtime edge aggregation payloads in milliseconds.
|
||||
#[serde(default = "default_api_runtime_edge_cache_ttl_ms")]
|
||||
pub runtime_edge_cache_ttl_ms: u64,
|
||||
|
||||
/// Top-N limit for edge connection leaderboard payloads.
|
||||
#[serde(default = "default_api_runtime_edge_top_n")]
|
||||
pub runtime_edge_top_n: usize,
|
||||
|
||||
/// Ring-buffer capacity for runtime edge control-plane events.
|
||||
#[serde(default = "default_api_runtime_edge_events_capacity")]
|
||||
pub runtime_edge_events_capacity: usize,
|
||||
|
||||
/// Read-only mode: mutating endpoints are rejected.
|
||||
#[serde(default)]
|
||||
pub read_only: bool,
|
||||
@@ -833,13 +1189,17 @@ pub struct ApiConfig {
|
||||
impl Default for ApiConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enabled: false,
|
||||
enabled: default_true(),
|
||||
listen: default_api_listen(),
|
||||
whitelist: default_api_whitelist(),
|
||||
auth_header: String::new(),
|
||||
request_body_limit_bytes: default_api_request_body_limit_bytes(),
|
||||
minimal_runtime_enabled: default_api_minimal_runtime_enabled(),
|
||||
minimal_runtime_cache_ttl_ms: default_api_minimal_runtime_cache_ttl_ms(),
|
||||
runtime_edge_enabled: default_api_runtime_edge_enabled(),
|
||||
runtime_edge_cache_ttl_ms: default_api_runtime_edge_cache_ttl_ms(),
|
||||
runtime_edge_top_n: default_api_runtime_edge_top_n(),
|
||||
runtime_edge_events_capacity: default_api_runtime_edge_events_capacity(),
|
||||
read_only: false,
|
||||
}
|
||||
}
|
||||
@@ -874,9 +1234,29 @@ pub struct ServerConfig {
|
||||
#[serde(default)]
|
||||
pub proxy_protocol: bool,
|
||||
|
||||
/// Timeout in milliseconds for reading and parsing PROXY protocol headers.
|
||||
#[serde(default = "default_proxy_protocol_header_timeout_ms")]
|
||||
pub proxy_protocol_header_timeout_ms: u64,
|
||||
|
||||
/// Trusted source CIDRs allowed to send incoming PROXY protocol headers.
|
||||
///
|
||||
/// If this field is omitted in config, it defaults to trust-all CIDRs
|
||||
/// (`0.0.0.0/0` and `::/0`). If it is explicitly set to an empty list,
|
||||
/// all PROXY protocol headers are rejected.
|
||||
#[serde(default = "default_proxy_protocol_trusted_cidrs")]
|
||||
pub proxy_protocol_trusted_cidrs: Vec<IpNetwork>,
|
||||
|
||||
/// Port for the Prometheus-compatible metrics endpoint.
|
||||
/// Enables metrics when set; binds on all interfaces (dual-stack) by default.
|
||||
#[serde(default)]
|
||||
pub metrics_port: Option<u16>,
|
||||
|
||||
/// Listen address for metrics in `IP:PORT` format (e.g. `"127.0.0.1:9090"`).
|
||||
/// When set, takes precedence over `metrics_port` and binds on the specified address only.
|
||||
#[serde(default)]
|
||||
pub metrics_listen: Option<String>,
|
||||
|
||||
/// CIDR whitelist for the metrics endpoint.
|
||||
#[serde(default = "default_metrics_whitelist")]
|
||||
pub metrics_whitelist: Vec<IpNetwork>,
|
||||
|
||||
@@ -885,6 +1265,16 @@ pub struct ServerConfig {
|
||||
|
||||
#[serde(default)]
|
||||
pub listeners: Vec<ListenerConfig>,
|
||||
|
||||
/// Maximum number of concurrent client connections.
|
||||
/// 0 means unlimited.
|
||||
#[serde(default = "default_server_max_connections")]
|
||||
pub max_connections: u32,
|
||||
|
||||
/// Maximum wait in milliseconds while acquiring a connection slot permit.
|
||||
/// `0` keeps legacy unbounded wait behavior.
|
||||
#[serde(default = "default_accept_permit_timeout_ms")]
|
||||
pub accept_permit_timeout_ms: u64,
|
||||
}
|
||||
|
||||
impl Default for ServerConfig {
|
||||
@@ -897,10 +1287,15 @@ impl Default for ServerConfig {
|
||||
listen_unix_sock_perm: None,
|
||||
listen_tcp: None,
|
||||
proxy_protocol: false,
|
||||
proxy_protocol_header_timeout_ms: default_proxy_protocol_header_timeout_ms(),
|
||||
proxy_protocol_trusted_cidrs: default_proxy_protocol_trusted_cidrs(),
|
||||
metrics_port: None,
|
||||
metrics_listen: None,
|
||||
metrics_whitelist: default_metrics_whitelist(),
|
||||
api: ApiConfig::default(),
|
||||
listeners: Vec::new(),
|
||||
max_connections: default_server_max_connections(),
|
||||
accept_permit_timeout_ms: default_accept_permit_timeout_ms(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -910,6 +1305,24 @@ pub struct TimeoutsConfig {
|
||||
#[serde(default = "default_handshake_timeout")]
|
||||
pub client_handshake: u64,
|
||||
|
||||
/// Enables soft/hard relay client idle policy for middle-relay sessions.
|
||||
#[serde(default = "default_relay_idle_policy_v2_enabled")]
|
||||
pub relay_idle_policy_v2_enabled: bool,
|
||||
|
||||
/// Soft idle threshold for middle-relay client uplink activity in seconds.
|
||||
/// Hitting this threshold marks the session as idle-candidate, but does not close it.
|
||||
#[serde(default = "default_relay_client_idle_soft_secs")]
|
||||
pub relay_client_idle_soft_secs: u64,
|
||||
|
||||
/// Hard idle threshold for middle-relay client uplink activity in seconds.
|
||||
/// Hitting this threshold closes the session.
|
||||
#[serde(default = "default_relay_client_idle_hard_secs")]
|
||||
pub relay_client_idle_hard_secs: u64,
|
||||
|
||||
/// Additional grace in seconds added to hard idle window after recent downstream activity.
|
||||
#[serde(default = "default_relay_idle_grace_after_downstream_activity_secs")]
|
||||
pub relay_idle_grace_after_downstream_activity_secs: u64,
|
||||
|
||||
#[serde(default = "default_connect_timeout")]
|
||||
pub tg_connect: u64,
|
||||
|
||||
@@ -932,6 +1345,11 @@ impl Default for TimeoutsConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
client_handshake: default_handshake_timeout(),
|
||||
relay_idle_policy_v2_enabled: default_relay_idle_policy_v2_enabled(),
|
||||
relay_client_idle_soft_secs: default_relay_client_idle_soft_secs(),
|
||||
relay_client_idle_hard_secs: default_relay_client_idle_hard_secs(),
|
||||
relay_idle_grace_after_downstream_activity_secs:
|
||||
default_relay_idle_grace_after_downstream_activity_secs(),
|
||||
tg_connect: default_connect_timeout(),
|
||||
client_keepalive: default_keepalive(),
|
||||
client_ack: default_ack_timeout(),
|
||||
@@ -941,6 +1359,90 @@ impl Default for TimeoutsConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum UnknownSniAction {
|
||||
#[default]
|
||||
Drop,
|
||||
Mask,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum TlsFetchProfile {
|
||||
ModernChromeLike,
|
||||
ModernFirefoxLike,
|
||||
CompatTls12,
|
||||
LegacyMinimal,
|
||||
}
|
||||
|
||||
impl TlsFetchProfile {
|
||||
pub fn as_str(self) -> &'static str {
|
||||
match self {
|
||||
TlsFetchProfile::ModernChromeLike => "modern_chrome_like",
|
||||
TlsFetchProfile::ModernFirefoxLike => "modern_firefox_like",
|
||||
TlsFetchProfile::CompatTls12 => "compat_tls12",
|
||||
TlsFetchProfile::LegacyMinimal => "legacy_minimal",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_tls_fetch_profiles() -> Vec<TlsFetchProfile> {
|
||||
vec![
|
||||
TlsFetchProfile::ModernChromeLike,
|
||||
TlsFetchProfile::ModernFirefoxLike,
|
||||
TlsFetchProfile::CompatTls12,
|
||||
TlsFetchProfile::LegacyMinimal,
|
||||
]
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TlsFetchConfig {
|
||||
/// Ordered list of ClientHello profiles used for adaptive fallback.
|
||||
#[serde(default = "default_tls_fetch_profiles")]
|
||||
pub profiles: Vec<TlsFetchProfile>,
|
||||
|
||||
/// When true and upstream route is configured, TLS fetch fails closed on
|
||||
/// upstream connect errors and does not fallback to direct TCP.
|
||||
#[serde(default = "default_tls_fetch_strict_route")]
|
||||
pub strict_route: bool,
|
||||
|
||||
/// Timeout per one profile attempt in milliseconds.
|
||||
#[serde(default = "default_tls_fetch_attempt_timeout_ms")]
|
||||
pub attempt_timeout_ms: u64,
|
||||
|
||||
/// Total wall-clock budget in milliseconds across all profile attempts.
|
||||
#[serde(default = "default_tls_fetch_total_budget_ms")]
|
||||
pub total_budget_ms: u64,
|
||||
|
||||
/// Adds GREASE-style values into selected ClientHello extensions.
|
||||
#[serde(default)]
|
||||
pub grease_enabled: bool,
|
||||
|
||||
/// Produces deterministic ClientHello randomness for debugging/tests.
|
||||
#[serde(default)]
|
||||
pub deterministic: bool,
|
||||
|
||||
/// TTL for winner-profile cache entries in seconds.
|
||||
/// Set to 0 to disable profile cache.
|
||||
#[serde(default = "default_tls_fetch_profile_cache_ttl_secs")]
|
||||
pub profile_cache_ttl_secs: u64,
|
||||
}
|
||||
|
||||
impl Default for TlsFetchConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
profiles: default_tls_fetch_profiles(),
|
||||
strict_route: default_tls_fetch_strict_route(),
|
||||
attempt_timeout_ms: default_tls_fetch_attempt_timeout_ms(),
|
||||
total_budget_ms: default_tls_fetch_total_budget_ms(),
|
||||
grease_enabled: false,
|
||||
deterministic: false,
|
||||
profile_cache_ttl_secs: default_tls_fetch_profile_cache_ttl_secs(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AntiCensorshipConfig {
|
||||
#[serde(default = "default_tls_domain")]
|
||||
@@ -950,6 +1452,19 @@ pub struct AntiCensorshipConfig {
|
||||
#[serde(default)]
|
||||
pub tls_domains: Vec<String>,
|
||||
|
||||
/// Policy for TLS ClientHello with unknown (non-configured) SNI.
|
||||
#[serde(default)]
|
||||
pub unknown_sni_action: UnknownSniAction,
|
||||
|
||||
/// Upstream scope used for TLS front metadata fetches.
|
||||
/// Empty value keeps default upstream routing behavior.
|
||||
#[serde(default = "default_tls_fetch_scope")]
|
||||
pub tls_fetch_scope: String,
|
||||
|
||||
/// Fetch strategy for TLS front metadata bootstrap and periodic refresh.
|
||||
#[serde(default)]
|
||||
pub tls_fetch: TlsFetchConfig,
|
||||
|
||||
#[serde(default = "default_true")]
|
||||
pub mask: bool,
|
||||
|
||||
@@ -1000,6 +1515,54 @@ pub struct AntiCensorshipConfig {
|
||||
/// Allows the backend to see the real client IP.
|
||||
#[serde(default)]
|
||||
pub mask_proxy_protocol: u8,
|
||||
|
||||
/// Enable shape-channel hardening on mask backend path by padding
|
||||
/// client->mask stream tail to configured buckets on stream end.
|
||||
#[serde(default = "default_mask_shape_hardening")]
|
||||
pub mask_shape_hardening: bool,
|
||||
|
||||
/// Opt-in aggressive shape hardening mode.
|
||||
/// When enabled, masking may shape some backend-silent timeout paths and
|
||||
/// enforces strictly positive above-cap blur when blur is enabled.
|
||||
#[serde(default = "default_mask_shape_hardening_aggressive_mode")]
|
||||
pub mask_shape_hardening_aggressive_mode: bool,
|
||||
|
||||
/// Minimum bucket size for mask shape hardening padding.
|
||||
#[serde(default = "default_mask_shape_bucket_floor_bytes")]
|
||||
pub mask_shape_bucket_floor_bytes: usize,
|
||||
|
||||
/// Maximum bucket size for mask shape hardening padding.
|
||||
#[serde(default = "default_mask_shape_bucket_cap_bytes")]
|
||||
pub mask_shape_bucket_cap_bytes: usize,
|
||||
|
||||
/// Add bounded random tail bytes even when total bytes already exceed
|
||||
/// mask_shape_bucket_cap_bytes.
|
||||
#[serde(default = "default_mask_shape_above_cap_blur")]
|
||||
pub mask_shape_above_cap_blur: bool,
|
||||
|
||||
/// Maximum random bytes appended above cap when above-cap blur is enabled.
|
||||
#[serde(default = "default_mask_shape_above_cap_blur_max_bytes")]
|
||||
pub mask_shape_above_cap_blur_max_bytes: usize,
|
||||
|
||||
/// Maximum bytes relayed per direction on unauthenticated masking fallback paths.
|
||||
#[serde(default = "default_mask_relay_max_bytes")]
|
||||
pub mask_relay_max_bytes: usize,
|
||||
|
||||
/// Prefetch timeout (ms) for extending fragmented masking classifier window.
|
||||
#[serde(default = "default_mask_classifier_prefetch_timeout_ms")]
|
||||
pub mask_classifier_prefetch_timeout_ms: u64,
|
||||
|
||||
/// Enable outcome-time normalization envelope for masking fallback.
|
||||
#[serde(default = "default_mask_timing_normalization_enabled")]
|
||||
pub mask_timing_normalization_enabled: bool,
|
||||
|
||||
/// Lower bound (ms) for masking outcome timing envelope.
|
||||
#[serde(default = "default_mask_timing_normalization_floor_ms")]
|
||||
pub mask_timing_normalization_floor_ms: u64,
|
||||
|
||||
/// Upper bound (ms) for masking outcome timing envelope.
|
||||
#[serde(default = "default_mask_timing_normalization_ceiling_ms")]
|
||||
pub mask_timing_normalization_ceiling_ms: u64,
|
||||
}
|
||||
|
||||
impl Default for AntiCensorshipConfig {
|
||||
@@ -1007,6 +1570,9 @@ impl Default for AntiCensorshipConfig {
|
||||
Self {
|
||||
tls_domain: default_tls_domain(),
|
||||
tls_domains: Vec::new(),
|
||||
unknown_sni_action: UnknownSniAction::Drop,
|
||||
tls_fetch_scope: default_tls_fetch_scope(),
|
||||
tls_fetch: TlsFetchConfig::default(),
|
||||
mask: default_true(),
|
||||
mask_host: None,
|
||||
mask_port: default_mask_port(),
|
||||
@@ -1020,6 +1586,17 @@ impl Default for AntiCensorshipConfig {
|
||||
tls_full_cert_ttl_secs: default_tls_full_cert_ttl_secs(),
|
||||
alpn_enforce: default_alpn_enforce(),
|
||||
mask_proxy_protocol: 0,
|
||||
mask_shape_hardening: default_mask_shape_hardening(),
|
||||
mask_shape_hardening_aggressive_mode: default_mask_shape_hardening_aggressive_mode(),
|
||||
mask_shape_bucket_floor_bytes: default_mask_shape_bucket_floor_bytes(),
|
||||
mask_shape_bucket_cap_bytes: default_mask_shape_bucket_cap_bytes(),
|
||||
mask_shape_above_cap_blur: default_mask_shape_above_cap_blur(),
|
||||
mask_shape_above_cap_blur_max_bytes: default_mask_shape_above_cap_blur_max_bytes(),
|
||||
mask_relay_max_bytes: default_mask_relay_max_bytes(),
|
||||
mask_classifier_prefetch_timeout_ms: default_mask_classifier_prefetch_timeout_ms(),
|
||||
mask_timing_normalization_enabled: default_mask_timing_normalization_enabled(),
|
||||
mask_timing_normalization_floor_ms: default_mask_timing_normalization_floor_ms(),
|
||||
mask_timing_normalization_ceiling_ms: default_mask_timing_normalization_ceiling_ms(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1045,6 +1622,17 @@ pub struct AccessConfig {
|
||||
#[serde(default)]
|
||||
pub user_max_unique_ips: HashMap<String, usize>,
|
||||
|
||||
/// Global per-user unique IP limit applied when a user has no individual override.
|
||||
/// `0` disables the inherited limit.
|
||||
#[serde(default = "default_user_max_unique_ips_global_each")]
|
||||
pub user_max_unique_ips_global_each: usize,
|
||||
|
||||
#[serde(default)]
|
||||
pub user_max_unique_ips_mode: UserMaxUniqueIpsMode,
|
||||
|
||||
#[serde(default = "default_user_max_unique_ips_window_secs")]
|
||||
pub user_max_unique_ips_window_secs: u64,
|
||||
|
||||
#[serde(default = "default_replay_check_len")]
|
||||
pub replay_check_len: usize,
|
||||
|
||||
@@ -1064,6 +1652,9 @@ impl Default for AccessConfig {
|
||||
user_expirations: HashMap::new(),
|
||||
user_data_quota: HashMap::new(),
|
||||
user_max_unique_ips: HashMap::new(),
|
||||
user_max_unique_ips_global_each: default_user_max_unique_ips_global_each(),
|
||||
user_max_unique_ips_mode: UserMaxUniqueIpsMode::default(),
|
||||
user_max_unique_ips_window_secs: default_user_max_unique_ips_window_secs(),
|
||||
replay_check_len: default_replay_check_len(),
|
||||
replay_window_secs: default_replay_window_secs(),
|
||||
ignore_time_skew: false,
|
||||
@@ -1098,6 +1689,11 @@ pub enum UpstreamType {
|
||||
#[serde(default)]
|
||||
password: Option<String>,
|
||||
},
|
||||
Shadowsocks {
|
||||
url: String,
|
||||
#[serde(default)]
|
||||
interface: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
@@ -1178,7 +1774,10 @@ impl ShowLink {
|
||||
}
|
||||
|
||||
impl Serialize for ShowLink {
|
||||
fn serialize<S: serde::Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
|
||||
fn serialize<S: serde::Serializer>(
|
||||
&self,
|
||||
serializer: S,
|
||||
) -> std::result::Result<S::Ok, S::Error> {
|
||||
match self {
|
||||
ShowLink::None => Vec::<String>::new().serialize(serializer),
|
||||
ShowLink::All => serializer.serialize_str("*"),
|
||||
@@ -1188,7 +1787,9 @@ impl Serialize for ShowLink {
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ShowLink {
|
||||
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
|
||||
fn deserialize<D: serde::Deserializer<'de>>(
|
||||
deserializer: D,
|
||||
) -> std::result::Result<Self, D::Error> {
|
||||
use serde::de;
|
||||
|
||||
struct ShowLinkVisitor;
|
||||
@@ -1204,14 +1805,14 @@ impl<'de> Deserialize<'de> for ShowLink {
|
||||
if v == "*" {
|
||||
Ok(ShowLink::All)
|
||||
} else {
|
||||
Err(de::Error::invalid_value(
|
||||
de::Unexpected::Str(v),
|
||||
&r#""*""#,
|
||||
))
|
||||
Err(de::Error::invalid_value(de::Unexpected::Str(v), &r#""*""#))
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_seq<A: de::SeqAccess<'de>>(self, mut seq: A) -> std::result::Result<ShowLink, A::Error> {
|
||||
fn visit_seq<A: de::SeqAccess<'de>>(
|
||||
self,
|
||||
mut seq: A,
|
||||
) -> std::result::Result<ShowLink, A::Error> {
|
||||
let mut names = Vec::new();
|
||||
while let Some(name) = seq.next_element::<String>()? {
|
||||
names.push(name);
|
||||
|
||||
@@ -13,10 +13,13 @@
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use aes::Aes256;
|
||||
use ctr::{Ctr128BE, cipher::{KeyIvInit, StreamCipher}};
|
||||
use zeroize::Zeroize;
|
||||
use crate::error::{ProxyError, Result};
|
||||
use aes::Aes256;
|
||||
use ctr::{
|
||||
Ctr128BE,
|
||||
cipher::{KeyIvInit, StreamCipher},
|
||||
};
|
||||
use zeroize::Zeroize;
|
||||
|
||||
type Aes256Ctr = Ctr128BE<Aes256>;
|
||||
|
||||
@@ -42,33 +45,39 @@ impl AesCtr {
|
||||
cipher: Aes256Ctr::new(key.into(), (&iv_bytes).into()),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Create from key and IV slices
|
||||
pub fn from_key_iv(key: &[u8], iv: &[u8]) -> Result<Self> {
|
||||
if key.len() != 32 {
|
||||
return Err(ProxyError::InvalidKeyLength { expected: 32, got: key.len() });
|
||||
return Err(ProxyError::InvalidKeyLength {
|
||||
expected: 32,
|
||||
got: key.len(),
|
||||
});
|
||||
}
|
||||
if iv.len() != 16 {
|
||||
return Err(ProxyError::InvalidKeyLength { expected: 16, got: iv.len() });
|
||||
return Err(ProxyError::InvalidKeyLength {
|
||||
expected: 16,
|
||||
got: iv.len(),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
let key: [u8; 32] = key.try_into().unwrap();
|
||||
let iv = u128::from_be_bytes(iv.try_into().unwrap());
|
||||
Ok(Self::new(&key, iv))
|
||||
}
|
||||
|
||||
|
||||
/// Encrypt/decrypt data in-place (CTR mode is symmetric)
|
||||
pub fn apply(&mut self, data: &mut [u8]) {
|
||||
self.cipher.apply_keystream(data);
|
||||
}
|
||||
|
||||
|
||||
/// Encrypt data, returning new buffer
|
||||
pub fn encrypt(&mut self, data: &[u8]) -> Vec<u8> {
|
||||
let mut output = data.to_vec();
|
||||
self.apply(&mut output);
|
||||
output
|
||||
}
|
||||
|
||||
|
||||
/// Decrypt data (for CTR, identical to encrypt)
|
||||
pub fn decrypt(&mut self, data: &[u8]) -> Vec<u8> {
|
||||
self.encrypt(data)
|
||||
@@ -99,27 +108,33 @@ impl Drop for AesCbc {
|
||||
impl AesCbc {
|
||||
/// AES block size
|
||||
const BLOCK_SIZE: usize = 16;
|
||||
|
||||
|
||||
/// Create new AES-CBC cipher with key and IV
|
||||
pub fn new(key: [u8; 32], iv: [u8; 16]) -> Self {
|
||||
Self { key, iv }
|
||||
}
|
||||
|
||||
|
||||
/// Create from slices
|
||||
pub fn from_slices(key: &[u8], iv: &[u8]) -> Result<Self> {
|
||||
if key.len() != 32 {
|
||||
return Err(ProxyError::InvalidKeyLength { expected: 32, got: key.len() });
|
||||
return Err(ProxyError::InvalidKeyLength {
|
||||
expected: 32,
|
||||
got: key.len(),
|
||||
});
|
||||
}
|
||||
if iv.len() != 16 {
|
||||
return Err(ProxyError::InvalidKeyLength { expected: 16, got: iv.len() });
|
||||
return Err(ProxyError::InvalidKeyLength {
|
||||
expected: 16,
|
||||
got: iv.len(),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
Ok(Self {
|
||||
key: key.try_into().unwrap(),
|
||||
iv: iv.try_into().unwrap(),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
/// Encrypt a single block using raw AES (no chaining)
|
||||
fn encrypt_block(&self, block: &[u8; 16], key_schedule: &aes::Aes256) -> [u8; 16] {
|
||||
use aes::cipher::BlockEncrypt;
|
||||
@@ -127,7 +142,7 @@ impl AesCbc {
|
||||
key_schedule.encrypt_block((&mut output).into());
|
||||
output
|
||||
}
|
||||
|
||||
|
||||
/// Decrypt a single block using raw AES (no chaining)
|
||||
fn decrypt_block(&self, block: &[u8; 16], key_schedule: &aes::Aes256) -> [u8; 16] {
|
||||
use aes::cipher::BlockDecrypt;
|
||||
@@ -135,7 +150,7 @@ impl AesCbc {
|
||||
key_schedule.decrypt_block((&mut output).into());
|
||||
output
|
||||
}
|
||||
|
||||
|
||||
/// XOR two 16-byte blocks
|
||||
fn xor_blocks(a: &[u8; 16], b: &[u8; 16]) -> [u8; 16] {
|
||||
let mut result = [0u8; 16];
|
||||
@@ -144,27 +159,28 @@ impl AesCbc {
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
|
||||
/// Encrypt data using CBC mode with proper chaining
|
||||
///
|
||||
/// CBC Encryption: C[i] = AES_Encrypt(P[i] XOR C[i-1]), where C[-1] = IV
|
||||
pub fn encrypt(&self, data: &[u8]) -> Result<Vec<u8>> {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
return Err(ProxyError::Crypto(format!(
|
||||
"CBC data must be aligned to 16 bytes, got {}",
|
||||
data.len()
|
||||
)));
|
||||
}
|
||||
|
||||
|
||||
if data.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
|
||||
use aes::cipher::KeyInit;
|
||||
let key_schedule = aes::Aes256::new((&self.key).into());
|
||||
|
||||
|
||||
let mut result = Vec::with_capacity(data.len());
|
||||
let mut prev_ciphertext = self.iv;
|
||||
|
||||
|
||||
for chunk in data.chunks(Self::BLOCK_SIZE) {
|
||||
let plaintext: [u8; 16] = chunk.try_into().unwrap();
|
||||
let xored = Self::xor_blocks(&plaintext, &prev_ciphertext);
|
||||
@@ -172,30 +188,31 @@ impl AesCbc {
|
||||
prev_ciphertext = ciphertext;
|
||||
result.extend_from_slice(&ciphertext);
|
||||
}
|
||||
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
|
||||
/// Decrypt data using CBC mode with proper chaining
|
||||
///
|
||||
/// CBC Decryption: P[i] = AES_Decrypt(C[i]) XOR C[i-1], where C[-1] = IV
|
||||
pub fn decrypt(&self, data: &[u8]) -> Result<Vec<u8>> {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
return Err(ProxyError::Crypto(format!(
|
||||
"CBC data must be aligned to 16 bytes, got {}",
|
||||
data.len()
|
||||
)));
|
||||
}
|
||||
|
||||
|
||||
if data.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
|
||||
use aes::cipher::KeyInit;
|
||||
let key_schedule = aes::Aes256::new((&self.key).into());
|
||||
|
||||
|
||||
let mut result = Vec::with_capacity(data.len());
|
||||
let mut prev_ciphertext = self.iv;
|
||||
|
||||
|
||||
for chunk in data.chunks(Self::BLOCK_SIZE) {
|
||||
let ciphertext: [u8; 16] = chunk.try_into().unwrap();
|
||||
let decrypted = self.decrypt_block(&ciphertext, &key_schedule);
|
||||
@@ -203,75 +220,77 @@ impl AesCbc {
|
||||
prev_ciphertext = ciphertext;
|
||||
result.extend_from_slice(&plaintext);
|
||||
}
|
||||
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
|
||||
/// Encrypt data in-place
|
||||
pub fn encrypt_in_place(&self, data: &mut [u8]) -> Result<()> {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
return Err(ProxyError::Crypto(format!(
|
||||
"CBC data must be aligned to 16 bytes, got {}",
|
||||
data.len()
|
||||
)));
|
||||
}
|
||||
|
||||
|
||||
if data.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
|
||||
use aes::cipher::KeyInit;
|
||||
let key_schedule = aes::Aes256::new((&self.key).into());
|
||||
|
||||
|
||||
let mut prev_ciphertext = self.iv;
|
||||
|
||||
|
||||
for i in (0..data.len()).step_by(Self::BLOCK_SIZE) {
|
||||
let block = &mut data[i..i + Self::BLOCK_SIZE];
|
||||
|
||||
|
||||
for j in 0..Self::BLOCK_SIZE {
|
||||
block[j] ^= prev_ciphertext[j];
|
||||
}
|
||||
|
||||
|
||||
let block_array: &mut [u8; 16] = block.try_into().unwrap();
|
||||
*block_array = self.encrypt_block(block_array, &key_schedule);
|
||||
|
||||
|
||||
prev_ciphertext = *block_array;
|
||||
}
|
||||
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
/// Decrypt data in-place
|
||||
pub fn decrypt_in_place(&self, data: &mut [u8]) -> Result<()> {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
return Err(ProxyError::Crypto(format!(
|
||||
"CBC data must be aligned to 16 bytes, got {}",
|
||||
data.len()
|
||||
)));
|
||||
}
|
||||
|
||||
|
||||
if data.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
|
||||
use aes::cipher::KeyInit;
|
||||
let key_schedule = aes::Aes256::new((&self.key).into());
|
||||
|
||||
|
||||
let mut prev_ciphertext = self.iv;
|
||||
|
||||
|
||||
for i in (0..data.len()).step_by(Self::BLOCK_SIZE) {
|
||||
let block = &mut data[i..i + Self::BLOCK_SIZE];
|
||||
|
||||
|
||||
let current_ciphertext: [u8; 16] = block.try_into().unwrap();
|
||||
|
||||
|
||||
let block_array: &mut [u8; 16] = block.try_into().unwrap();
|
||||
*block_array = self.decrypt_block(block_array, &key_schedule);
|
||||
|
||||
|
||||
for j in 0..Self::BLOCK_SIZE {
|
||||
block[j] ^= prev_ciphertext[j];
|
||||
}
|
||||
|
||||
|
||||
prev_ciphertext = current_ciphertext;
|
||||
}
|
||||
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -318,227 +337,227 @@ impl Decryptor for PassthroughEncryptor {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
|
||||
// ============= AES-CTR Tests =============
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_ctr_roundtrip() {
|
||||
let key = [0u8; 32];
|
||||
let iv = 12345u128;
|
||||
|
||||
|
||||
let original = b"Hello, MTProto!";
|
||||
|
||||
|
||||
let mut enc = AesCtr::new(&key, iv);
|
||||
let encrypted = enc.encrypt(original);
|
||||
|
||||
|
||||
let mut dec = AesCtr::new(&key, iv);
|
||||
let decrypted = dec.decrypt(&encrypted);
|
||||
|
||||
|
||||
assert_eq!(original.as_slice(), decrypted.as_slice());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_ctr_in_place() {
|
||||
let key = [0x42u8; 32];
|
||||
let iv = 999u128;
|
||||
|
||||
|
||||
let original = b"Test data for in-place encryption";
|
||||
let mut data = original.to_vec();
|
||||
|
||||
|
||||
let mut cipher = AesCtr::new(&key, iv);
|
||||
cipher.apply(&mut data);
|
||||
|
||||
|
||||
assert_ne!(&data[..], original);
|
||||
|
||||
|
||||
let mut cipher = AesCtr::new(&key, iv);
|
||||
cipher.apply(&mut data);
|
||||
|
||||
|
||||
assert_eq!(&data[..], original);
|
||||
}
|
||||
|
||||
|
||||
// ============= AES-CBC Tests =============
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_roundtrip() {
|
||||
let key = [0u8; 32];
|
||||
let iv = [0u8; 16];
|
||||
|
||||
|
||||
let original = [0u8; 32];
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
let encrypted = cipher.encrypt(&original).unwrap();
|
||||
let decrypted = cipher.decrypt(&encrypted).unwrap();
|
||||
|
||||
|
||||
assert_eq!(original.as_slice(), decrypted.as_slice());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_chaining_works() {
|
||||
let key = [0x42u8; 32];
|
||||
let iv = [0x00u8; 16];
|
||||
|
||||
|
||||
let plaintext = [0xAAu8; 32];
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
let ciphertext = cipher.encrypt(&plaintext).unwrap();
|
||||
|
||||
|
||||
let block1 = &ciphertext[0..16];
|
||||
let block2 = &ciphertext[16..32];
|
||||
|
||||
|
||||
assert_ne!(
|
||||
block1, block2,
|
||||
"CBC chaining broken: identical plaintext blocks produced identical ciphertext"
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_known_vector() {
|
||||
let key = [0u8; 32];
|
||||
let iv = [0u8; 16];
|
||||
let plaintext = [0u8; 16];
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
let ciphertext = cipher.encrypt(&plaintext).unwrap();
|
||||
|
||||
|
||||
let decrypted = cipher.decrypt(&ciphertext).unwrap();
|
||||
assert_eq!(plaintext.as_slice(), decrypted.as_slice());
|
||||
|
||||
|
||||
assert_ne!(ciphertext.as_slice(), plaintext.as_slice());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_multi_block() {
|
||||
let key = [0x12u8; 32];
|
||||
let iv = [0x34u8; 16];
|
||||
|
||||
|
||||
let plaintext: Vec<u8> = (0..80).collect();
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
let ciphertext = cipher.encrypt(&plaintext).unwrap();
|
||||
let decrypted = cipher.decrypt(&ciphertext).unwrap();
|
||||
|
||||
|
||||
assert_eq!(plaintext, decrypted);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_in_place() {
|
||||
let key = [0x12u8; 32];
|
||||
let iv = [0x34u8; 16];
|
||||
|
||||
|
||||
let original = [0x56u8; 48];
|
||||
let mut buffer = original;
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
|
||||
|
||||
cipher.encrypt_in_place(&mut buffer).unwrap();
|
||||
assert_ne!(&buffer[..], &original[..]);
|
||||
|
||||
|
||||
cipher.decrypt_in_place(&mut buffer).unwrap();
|
||||
assert_eq!(&buffer[..], &original[..]);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_empty_data() {
|
||||
let cipher = AesCbc::new([0u8; 32], [0u8; 16]);
|
||||
|
||||
|
||||
let encrypted = cipher.encrypt(&[]).unwrap();
|
||||
assert!(encrypted.is_empty());
|
||||
|
||||
|
||||
let decrypted = cipher.decrypt(&[]).unwrap();
|
||||
assert!(decrypted.is_empty());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_unaligned_error() {
|
||||
let cipher = AesCbc::new([0u8; 32], [0u8; 16]);
|
||||
|
||||
|
||||
let result = cipher.encrypt(&[0u8; 15]);
|
||||
assert!(result.is_err());
|
||||
|
||||
|
||||
let result = cipher.encrypt(&[0u8; 17]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_avalanche_effect() {
|
||||
let key = [0xAB; 32];
|
||||
let iv = [0xCD; 16];
|
||||
|
||||
|
||||
let plaintext1 = [0u8; 32];
|
||||
let mut plaintext2 = [0u8; 32];
|
||||
plaintext2[0] = 0x01;
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
|
||||
|
||||
let ciphertext1 = cipher.encrypt(&plaintext1).unwrap();
|
||||
let ciphertext2 = cipher.encrypt(&plaintext2).unwrap();
|
||||
|
||||
|
||||
assert_ne!(&ciphertext1[0..16], &ciphertext2[0..16]);
|
||||
assert_ne!(&ciphertext1[16..32], &ciphertext2[16..32]);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_iv_matters() {
|
||||
let key = [0x55; 32];
|
||||
let plaintext = [0x77u8; 16];
|
||||
|
||||
|
||||
let cipher1 = AesCbc::new(key, [0u8; 16]);
|
||||
let cipher2 = AesCbc::new(key, [1u8; 16]);
|
||||
|
||||
|
||||
let ciphertext1 = cipher1.encrypt(&plaintext).unwrap();
|
||||
let ciphertext2 = cipher2.encrypt(&plaintext).unwrap();
|
||||
|
||||
|
||||
assert_ne!(ciphertext1, ciphertext2);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_deterministic() {
|
||||
let key = [0x99; 32];
|
||||
let iv = [0x88; 16];
|
||||
let plaintext = [0x77u8; 32];
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
|
||||
|
||||
let ciphertext1 = cipher.encrypt(&plaintext).unwrap();
|
||||
let ciphertext2 = cipher.encrypt(&plaintext).unwrap();
|
||||
|
||||
|
||||
assert_eq!(ciphertext1, ciphertext2);
|
||||
}
|
||||
|
||||
|
||||
// ============= Zeroize Tests =============
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_zeroize_on_drop() {
|
||||
let key = [0xAA; 32];
|
||||
let iv = [0xBB; 16];
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
// Verify key/iv are set
|
||||
assert_eq!(cipher.key, [0xAA; 32]);
|
||||
assert_eq!(cipher.iv, [0xBB; 16]);
|
||||
|
||||
|
||||
drop(cipher);
|
||||
// After drop, key/iv are zeroized (can't observe directly,
|
||||
// but the Drop impl runs without panic)
|
||||
}
|
||||
|
||||
|
||||
// ============= Error Handling Tests =============
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_invalid_key_length() {
|
||||
let result = AesCtr::from_key_iv(&[0u8; 16], &[0u8; 16]);
|
||||
assert!(result.is_err());
|
||||
|
||||
|
||||
let result = AesCbc::from_slices(&[0u8; 16], &[0u8; 16]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_invalid_iv_length() {
|
||||
let result = AesCtr::from_key_iv(&[0u8; 32], &[0u8; 8]);
|
||||
assert!(result.is_err());
|
||||
|
||||
|
||||
let result = AesCbc::from_slices(&[0u8; 32], &[0u8; 8]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,10 +12,10 @@
|
||||
//! usages are intentional and protocol-mandated.
|
||||
|
||||
use hmac::{Hmac, Mac};
|
||||
use sha2::Sha256;
|
||||
use md5::Md5;
|
||||
use sha1::Sha1;
|
||||
use sha2::Digest;
|
||||
use sha2::Sha256;
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
@@ -28,8 +28,7 @@ pub fn sha256(data: &[u8]) -> [u8; 32] {
|
||||
|
||||
/// SHA-256 HMAC
|
||||
pub fn sha256_hmac(key: &[u8], data: &[u8]) -> [u8; 32] {
|
||||
let mut mac = HmacSha256::new_from_slice(key)
|
||||
.expect("HMAC accepts any key length");
|
||||
let mut mac = HmacSha256::new_from_slice(key).expect("HMAC accepts any key length");
|
||||
mac.update(data);
|
||||
mac.finalize().into_bytes().into()
|
||||
}
|
||||
@@ -124,27 +123,18 @@ pub fn derive_middleproxy_keys(
|
||||
srv_ipv6: Option<&[u8; 16]>,
|
||||
) -> ([u8; 32], [u8; 16]) {
|
||||
let s = build_middleproxy_prekey(
|
||||
nonce_srv,
|
||||
nonce_clt,
|
||||
clt_ts,
|
||||
srv_ip,
|
||||
clt_port,
|
||||
purpose,
|
||||
clt_ip,
|
||||
srv_port,
|
||||
secret,
|
||||
clt_ipv6,
|
||||
srv_ipv6,
|
||||
nonce_srv, nonce_clt, clt_ts, srv_ip, clt_port, purpose, clt_ip, srv_port, secret,
|
||||
clt_ipv6, srv_ipv6,
|
||||
);
|
||||
|
||||
let md5_1 = md5(&s[1..]);
|
||||
let sha1_sum = sha1(&s);
|
||||
let md5_2 = md5(&s[2..]);
|
||||
|
||||
|
||||
let mut key = [0u8; 32];
|
||||
key[..12].copy_from_slice(&md5_1[..12]);
|
||||
key[12..].copy_from_slice(&sha1_sum);
|
||||
|
||||
|
||||
(key, md5_2)
|
||||
}
|
||||
|
||||
@@ -164,17 +154,8 @@ mod tests {
|
||||
let secret = vec![0x55u8; 128];
|
||||
|
||||
let prekey = build_middleproxy_prekey(
|
||||
&nonce_srv,
|
||||
&nonce_clt,
|
||||
&clt_ts,
|
||||
srv_ip,
|
||||
&clt_port,
|
||||
b"CLIENT",
|
||||
clt_ip,
|
||||
&srv_port,
|
||||
&secret,
|
||||
None,
|
||||
None,
|
||||
&nonce_srv, &nonce_clt, &clt_ts, srv_ip, &clt_port, b"CLIENT", clt_ip, &srv_port,
|
||||
&secret, None, None,
|
||||
);
|
||||
let digest = sha256(&prekey);
|
||||
assert_eq!(
|
||||
|
||||
@@ -4,7 +4,7 @@ pub mod aes;
|
||||
pub mod hash;
|
||||
pub mod random;
|
||||
|
||||
pub use aes::{AesCtr, AesCbc};
|
||||
pub use aes::{AesCbc, AesCtr};
|
||||
pub use hash::{
|
||||
build_middleproxy_prekey, crc32, crc32c, derive_middleproxy_keys, sha256, sha256_hmac,
|
||||
};
|
||||
|
||||
@@ -3,11 +3,11 @@
|
||||
#![allow(deprecated)]
|
||||
#![allow(dead_code)]
|
||||
|
||||
use rand::{Rng, RngCore, SeedableRng};
|
||||
use rand::rngs::StdRng;
|
||||
use parking_lot::Mutex;
|
||||
use zeroize::Zeroize;
|
||||
use crate::crypto::AesCtr;
|
||||
use parking_lot::Mutex;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, RngExt, SeedableRng};
|
||||
use zeroize::Zeroize;
|
||||
|
||||
/// Cryptographically secure PRNG with AES-CTR
|
||||
pub struct SecureRandom {
|
||||
@@ -21,6 +21,7 @@ struct SecureRandomInner {
|
||||
rng: StdRng,
|
||||
cipher: AesCtr,
|
||||
buffer: Vec<u8>,
|
||||
buffer_start: usize,
|
||||
}
|
||||
|
||||
impl Drop for SecureRandomInner {
|
||||
@@ -33,25 +34,26 @@ impl SecureRandom {
|
||||
pub fn new() -> Self {
|
||||
let mut seed_source = rand::rng();
|
||||
let mut rng = StdRng::from_rng(&mut seed_source);
|
||||
|
||||
|
||||
let mut key = [0u8; 32];
|
||||
rng.fill_bytes(&mut key);
|
||||
let iv: u128 = rng.random();
|
||||
|
||||
|
||||
let cipher = AesCtr::new(&key, iv);
|
||||
|
||||
|
||||
// Zeroize local key copy — cipher already consumed it
|
||||
key.zeroize();
|
||||
|
||||
|
||||
Self {
|
||||
inner: Mutex::new(SecureRandomInner {
|
||||
rng,
|
||||
cipher,
|
||||
buffer: Vec::with_capacity(1024),
|
||||
buffer_start: 0,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Fill a caller-provided buffer with random bytes.
|
||||
pub fn fill(&self, out: &mut [u8]) {
|
||||
let mut inner = self.inner.lock();
|
||||
@@ -59,16 +61,29 @@ impl SecureRandom {
|
||||
|
||||
let mut written = 0usize;
|
||||
while written < out.len() {
|
||||
if inner.buffer_start >= inner.buffer.len() {
|
||||
inner.buffer.clear();
|
||||
inner.buffer_start = 0;
|
||||
}
|
||||
|
||||
if inner.buffer.is_empty() {
|
||||
let mut chunk = vec![0u8; CHUNK_SIZE];
|
||||
inner.rng.fill_bytes(&mut chunk);
|
||||
inner.cipher.apply(&mut chunk);
|
||||
inner.buffer.extend_from_slice(&chunk);
|
||||
inner.buffer_start = 0;
|
||||
}
|
||||
|
||||
let take = (out.len() - written).min(inner.buffer.len());
|
||||
out[written..written + take].copy_from_slice(&inner.buffer[..take]);
|
||||
inner.buffer.drain(..take);
|
||||
let available = inner.buffer.len().saturating_sub(inner.buffer_start);
|
||||
let take = (out.len() - written).min(available);
|
||||
let start = inner.buffer_start;
|
||||
let end = start + take;
|
||||
out[written..written + take].copy_from_slice(&inner.buffer[start..end]);
|
||||
inner.buffer_start = end;
|
||||
if inner.buffer_start >= inner.buffer.len() {
|
||||
inner.buffer.clear();
|
||||
inner.buffer_start = 0;
|
||||
}
|
||||
written += take;
|
||||
}
|
||||
}
|
||||
@@ -79,25 +94,25 @@ impl SecureRandom {
|
||||
self.fill(&mut out);
|
||||
out
|
||||
}
|
||||
|
||||
|
||||
/// Generate random number in range [0, max)
|
||||
pub fn range(&self, max: usize) -> usize {
|
||||
if max == 0 {
|
||||
return 0;
|
||||
}
|
||||
let mut inner = self.inner.lock();
|
||||
inner.rng.gen_range(0..max)
|
||||
inner.rng.random_range(0..max)
|
||||
}
|
||||
|
||||
|
||||
/// Generate random bits
|
||||
pub fn bits(&self, k: usize) -> u64 {
|
||||
if k == 0 {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
let bytes_needed = k.div_ceil(8);
|
||||
let bytes = self.bytes(bytes_needed.min(8));
|
||||
|
||||
|
||||
let mut result = 0u64;
|
||||
for (i, &b) in bytes.iter().enumerate() {
|
||||
if i >= 8 {
|
||||
@@ -105,14 +120,14 @@ impl SecureRandom {
|
||||
}
|
||||
result |= (b as u64) << (i * 8);
|
||||
}
|
||||
|
||||
|
||||
if k < 64 {
|
||||
result &= (1u64 << k) - 1;
|
||||
}
|
||||
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
|
||||
/// Choose random element from slice
|
||||
pub fn choose<'a, T>(&self, slice: &'a [T]) -> Option<&'a T> {
|
||||
if slice.is_empty() {
|
||||
@@ -121,22 +136,22 @@ impl SecureRandom {
|
||||
Some(&slice[self.range(slice.len())])
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Shuffle slice in place
|
||||
pub fn shuffle<T>(&self, slice: &mut [T]) {
|
||||
let mut inner = self.inner.lock();
|
||||
for i in (1..slice.len()).rev() {
|
||||
let j = inner.rng.gen_range(0..=i);
|
||||
let j = inner.rng.random_range(0..=i);
|
||||
slice.swap(i, j);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Generate random u32
|
||||
pub fn u32(&self) -> u32 {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.rng.random()
|
||||
}
|
||||
|
||||
|
||||
/// Generate random u64
|
||||
pub fn u64(&self) -> u64 {
|
||||
let mut inner = self.inner.lock();
|
||||
@@ -154,7 +169,7 @@ impl Default for SecureRandom {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::collections::HashSet;
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_bytes_uniqueness() {
|
||||
let rng = SecureRandom::new();
|
||||
@@ -162,7 +177,7 @@ mod tests {
|
||||
let b = rng.bytes(32);
|
||||
assert_ne!(a, b);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_bytes_length() {
|
||||
let rng = SecureRandom::new();
|
||||
@@ -171,63 +186,63 @@ mod tests {
|
||||
assert_eq!(rng.bytes(100).len(), 100);
|
||||
assert_eq!(rng.bytes(1000).len(), 1000);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_range() {
|
||||
let rng = SecureRandom::new();
|
||||
|
||||
|
||||
for _ in 0..1000 {
|
||||
let n = rng.range(10);
|
||||
assert!(n < 10);
|
||||
}
|
||||
|
||||
|
||||
assert_eq!(rng.range(1), 0);
|
||||
assert_eq!(rng.range(0), 0);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_bits() {
|
||||
let rng = SecureRandom::new();
|
||||
|
||||
|
||||
for _ in 0..100 {
|
||||
assert!(rng.bits(1) <= 1);
|
||||
}
|
||||
|
||||
|
||||
for _ in 0..100 {
|
||||
assert!(rng.bits(8) <= 255);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_choose() {
|
||||
let rng = SecureRandom::new();
|
||||
let items = vec![1, 2, 3, 4, 5];
|
||||
|
||||
|
||||
let mut seen = HashSet::new();
|
||||
for _ in 0..1000 {
|
||||
if let Some(&item) = rng.choose(&items) {
|
||||
seen.insert(item);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
assert_eq!(seen.len(), 5);
|
||||
|
||||
|
||||
let empty: Vec<i32> = vec![];
|
||||
assert!(rng.choose(&empty).is_none());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_shuffle() {
|
||||
let rng = SecureRandom::new();
|
||||
let original = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
|
||||
|
||||
|
||||
let mut shuffled = original.clone();
|
||||
rng.shuffle(&mut shuffled);
|
||||
|
||||
|
||||
let mut sorted = shuffled.clone();
|
||||
sorted.sort();
|
||||
assert_eq!(sorted, original);
|
||||
|
||||
|
||||
assert_ne!(shuffled, original);
|
||||
}
|
||||
}
|
||||
|
||||
211
src/error.rs
211
src/error.rs
@@ -12,28 +12,15 @@ use thiserror::Error;
|
||||
#[derive(Debug)]
|
||||
pub enum StreamError {
|
||||
/// Partial read: got fewer bytes than expected
|
||||
PartialRead {
|
||||
expected: usize,
|
||||
got: usize,
|
||||
},
|
||||
PartialRead { expected: usize, got: usize },
|
||||
/// Partial write: wrote fewer bytes than expected
|
||||
PartialWrite {
|
||||
expected: usize,
|
||||
written: usize,
|
||||
},
|
||||
PartialWrite { expected: usize, written: usize },
|
||||
/// Stream is in poisoned state and cannot be used
|
||||
Poisoned {
|
||||
reason: String,
|
||||
},
|
||||
Poisoned { reason: String },
|
||||
/// Buffer overflow: attempted to buffer more than allowed
|
||||
BufferOverflow {
|
||||
limit: usize,
|
||||
attempted: usize,
|
||||
},
|
||||
BufferOverflow { limit: usize, attempted: usize },
|
||||
/// Invalid frame format
|
||||
InvalidFrame {
|
||||
details: String,
|
||||
},
|
||||
InvalidFrame { details: String },
|
||||
/// Unexpected end of stream
|
||||
UnexpectedEof,
|
||||
/// Underlying I/O error
|
||||
@@ -47,13 +34,21 @@ impl fmt::Display for StreamError {
|
||||
write!(f, "partial read: expected {} bytes, got {}", expected, got)
|
||||
}
|
||||
Self::PartialWrite { expected, written } => {
|
||||
write!(f, "partial write: expected {} bytes, wrote {}", expected, written)
|
||||
write!(
|
||||
f,
|
||||
"partial write: expected {} bytes, wrote {}",
|
||||
expected, written
|
||||
)
|
||||
}
|
||||
Self::Poisoned { reason } => {
|
||||
write!(f, "stream poisoned: {}", reason)
|
||||
}
|
||||
Self::BufferOverflow { limit, attempted } => {
|
||||
write!(f, "buffer overflow: limit {}, attempted {}", limit, attempted)
|
||||
write!(
|
||||
f,
|
||||
"buffer overflow: limit {}, attempted {}",
|
||||
limit, attempted
|
||||
)
|
||||
}
|
||||
Self::InvalidFrame { details } => {
|
||||
write!(f, "invalid frame: {}", details)
|
||||
@@ -90,9 +85,7 @@ impl From<StreamError> for std::io::Error {
|
||||
StreamError::UnexpectedEof => {
|
||||
std::io::Error::new(std::io::ErrorKind::UnexpectedEof, err)
|
||||
}
|
||||
StreamError::Poisoned { .. } => {
|
||||
std::io::Error::other(err)
|
||||
}
|
||||
StreamError::Poisoned { .. } => std::io::Error::other(err),
|
||||
StreamError::BufferOverflow { .. } => {
|
||||
std::io::Error::new(std::io::ErrorKind::OutOfMemory, err)
|
||||
}
|
||||
@@ -112,7 +105,7 @@ impl From<StreamError> for std::io::Error {
|
||||
pub trait Recoverable {
|
||||
/// Check if error is recoverable (can retry operation)
|
||||
fn is_recoverable(&self) -> bool;
|
||||
|
||||
|
||||
/// Check if connection can continue after this error
|
||||
fn can_continue(&self) -> bool;
|
||||
}
|
||||
@@ -123,19 +116,22 @@ impl Recoverable for StreamError {
|
||||
Self::PartialRead { .. } | Self::PartialWrite { .. } => true,
|
||||
Self::Io(e) => matches!(
|
||||
e.kind(),
|
||||
std::io::ErrorKind::WouldBlock
|
||||
| std::io::ErrorKind::Interrupted
|
||||
| std::io::ErrorKind::TimedOut
|
||||
std::io::ErrorKind::WouldBlock
|
||||
| std::io::ErrorKind::Interrupted
|
||||
| std::io::ErrorKind::TimedOut
|
||||
),
|
||||
Self::Poisoned { .. }
|
||||
Self::Poisoned { .. }
|
||||
| Self::BufferOverflow { .. }
|
||||
| Self::InvalidFrame { .. }
|
||||
| Self::UnexpectedEof => false,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn can_continue(&self) -> bool {
|
||||
!matches!(self, Self::Poisoned { .. } | Self::UnexpectedEof | Self::BufferOverflow { .. })
|
||||
!matches!(
|
||||
self,
|
||||
Self::Poisoned { .. } | Self::UnexpectedEof | Self::BufferOverflow { .. }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,19 +139,19 @@ impl Recoverable for std::io::Error {
|
||||
fn is_recoverable(&self) -> bool {
|
||||
matches!(
|
||||
self.kind(),
|
||||
std::io::ErrorKind::WouldBlock
|
||||
| std::io::ErrorKind::Interrupted
|
||||
| std::io::ErrorKind::TimedOut
|
||||
std::io::ErrorKind::WouldBlock
|
||||
| std::io::ErrorKind::Interrupted
|
||||
| std::io::ErrorKind::TimedOut
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
fn can_continue(&self) -> bool {
|
||||
!matches!(
|
||||
self.kind(),
|
||||
std::io::ErrorKind::BrokenPipe
|
||||
| std::io::ErrorKind::ConnectionReset
|
||||
| std::io::ErrorKind::ConnectionAborted
|
||||
| std::io::ErrorKind::NotConnected
|
||||
| std::io::ErrorKind::ConnectionReset
|
||||
| std::io::ErrorKind::ConnectionAborted
|
||||
| std::io::ErrorKind::NotConnected
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -165,96 +161,91 @@ impl Recoverable for std::io::Error {
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ProxyError {
|
||||
// ============= Crypto Errors =============
|
||||
|
||||
#[error("Crypto error: {0}")]
|
||||
Crypto(String),
|
||||
|
||||
|
||||
#[error("Invalid key length: expected {expected}, got {got}")]
|
||||
InvalidKeyLength { expected: usize, got: usize },
|
||||
|
||||
|
||||
// ============= Stream Errors =============
|
||||
|
||||
#[error("Stream error: {0}")]
|
||||
Stream(#[from] StreamError),
|
||||
|
||||
|
||||
// ============= Protocol Errors =============
|
||||
|
||||
#[error("Invalid handshake: {0}")]
|
||||
InvalidHandshake(String),
|
||||
|
||||
|
||||
#[error("Invalid protocol tag: {0:02x?}")]
|
||||
InvalidProtoTag([u8; 4]),
|
||||
|
||||
|
||||
#[error("Invalid TLS record: type={record_type}, version={version:02x?}")]
|
||||
InvalidTlsRecord { record_type: u8, version: [u8; 2] },
|
||||
|
||||
|
||||
#[error("Replay attack detected from {addr}")]
|
||||
ReplayAttack { addr: SocketAddr },
|
||||
|
||||
|
||||
#[error("Time skew detected: client={client_time}, server={server_time}")]
|
||||
TimeSkew { client_time: u32, server_time: u32 },
|
||||
|
||||
|
||||
#[error("Invalid message length: {len} (min={min}, max={max})")]
|
||||
InvalidMessageLength { len: usize, min: usize, max: usize },
|
||||
|
||||
|
||||
#[error("Checksum mismatch: expected={expected:08x}, got={got:08x}")]
|
||||
ChecksumMismatch { expected: u32, got: u32 },
|
||||
|
||||
|
||||
#[error("Sequence number mismatch: expected={expected}, got={got}")]
|
||||
SeqNoMismatch { expected: i32, got: i32 },
|
||||
|
||||
|
||||
#[error("TLS handshake failed: {reason}")]
|
||||
TlsHandshakeFailed { reason: String },
|
||||
|
||||
|
||||
#[error("Telegram handshake timeout")]
|
||||
TgHandshakeTimeout,
|
||||
|
||||
|
||||
// ============= Network Errors =============
|
||||
|
||||
#[error("Connection timeout to {addr}")]
|
||||
ConnectionTimeout { addr: String },
|
||||
|
||||
|
||||
#[error("Connection refused by {addr}")]
|
||||
ConnectionRefused { addr: String },
|
||||
|
||||
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
|
||||
// ============= Proxy Protocol Errors =============
|
||||
|
||||
#[error("Invalid proxy protocol header")]
|
||||
InvalidProxyProtocol,
|
||||
|
||||
|
||||
#[error("Unknown TLS SNI")]
|
||||
UnknownTlsSni,
|
||||
|
||||
#[error("Proxy error: {0}")]
|
||||
Proxy(String),
|
||||
|
||||
|
||||
// ============= Config Errors =============
|
||||
|
||||
#[error("Config error: {0}")]
|
||||
Config(String),
|
||||
|
||||
|
||||
#[error("Invalid secret for user {user}: {reason}")]
|
||||
InvalidSecret { user: String, reason: String },
|
||||
|
||||
|
||||
// ============= User Errors =============
|
||||
|
||||
#[error("User {user} expired")]
|
||||
UserExpired { user: String },
|
||||
|
||||
|
||||
#[error("User {user} exceeded connection limit")]
|
||||
ConnectionLimitExceeded { user: String },
|
||||
|
||||
|
||||
#[error("User {user} exceeded data quota")]
|
||||
DataQuotaExceeded { user: String },
|
||||
|
||||
|
||||
#[error("Unknown user")]
|
||||
UnknownUser,
|
||||
|
||||
|
||||
#[error("Rate limited")]
|
||||
RateLimited,
|
||||
|
||||
|
||||
// ============= General Errors =============
|
||||
|
||||
#[error("Internal error: {0}")]
|
||||
Internal(String),
|
||||
}
|
||||
@@ -269,7 +260,7 @@ impl Recoverable for ProxyError {
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn can_continue(&self) -> bool {
|
||||
match self {
|
||||
Self::Stream(e) => e.can_continue(),
|
||||
@@ -301,17 +292,19 @@ impl<T, R, W> HandshakeResult<T, R, W> {
|
||||
pub fn is_success(&self) -> bool {
|
||||
matches!(self, HandshakeResult::Success(_))
|
||||
}
|
||||
|
||||
|
||||
/// Check if bad client
|
||||
pub fn is_bad_client(&self) -> bool {
|
||||
matches!(self, HandshakeResult::BadClient { .. })
|
||||
}
|
||||
|
||||
|
||||
/// Map the success value
|
||||
pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> HandshakeResult<U, R, W> {
|
||||
match self {
|
||||
HandshakeResult::Success(v) => HandshakeResult::Success(f(v)),
|
||||
HandshakeResult::BadClient { reader, writer } => HandshakeResult::BadClient { reader, writer },
|
||||
HandshakeResult::BadClient { reader, writer } => {
|
||||
HandshakeResult::BadClient { reader, writer }
|
||||
}
|
||||
HandshakeResult::Error(e) => HandshakeResult::Error(e),
|
||||
}
|
||||
}
|
||||
@@ -338,76 +331,104 @@ impl<T, R, W> From<StreamError> for HandshakeResult<T, R, W> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_stream_error_display() {
|
||||
let err = StreamError::PartialRead { expected: 100, got: 50 };
|
||||
let err = StreamError::PartialRead {
|
||||
expected: 100,
|
||||
got: 50,
|
||||
};
|
||||
assert!(err.to_string().contains("100"));
|
||||
assert!(err.to_string().contains("50"));
|
||||
|
||||
let err = StreamError::Poisoned { reason: "test".into() };
|
||||
|
||||
let err = StreamError::Poisoned {
|
||||
reason: "test".into(),
|
||||
};
|
||||
assert!(err.to_string().contains("test"));
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_stream_error_recoverable() {
|
||||
assert!(StreamError::PartialRead { expected: 10, got: 5 }.is_recoverable());
|
||||
assert!(StreamError::PartialWrite { expected: 10, written: 5 }.is_recoverable());
|
||||
assert!(
|
||||
StreamError::PartialRead {
|
||||
expected: 10,
|
||||
got: 5
|
||||
}
|
||||
.is_recoverable()
|
||||
);
|
||||
assert!(
|
||||
StreamError::PartialWrite {
|
||||
expected: 10,
|
||||
written: 5
|
||||
}
|
||||
.is_recoverable()
|
||||
);
|
||||
assert!(!StreamError::Poisoned { reason: "x".into() }.is_recoverable());
|
||||
assert!(!StreamError::UnexpectedEof.is_recoverable());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_stream_error_can_continue() {
|
||||
assert!(!StreamError::Poisoned { reason: "x".into() }.can_continue());
|
||||
assert!(!StreamError::UnexpectedEof.can_continue());
|
||||
assert!(StreamError::PartialRead { expected: 10, got: 5 }.can_continue());
|
||||
assert!(
|
||||
StreamError::PartialRead {
|
||||
expected: 10,
|
||||
got: 5
|
||||
}
|
||||
.can_continue()
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_stream_error_to_io_error() {
|
||||
let stream_err = StreamError::UnexpectedEof;
|
||||
let io_err: std::io::Error = stream_err.into();
|
||||
assert_eq!(io_err.kind(), std::io::ErrorKind::UnexpectedEof);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_handshake_result() {
|
||||
let success: HandshakeResult<i32, (), ()> = HandshakeResult::Success(42);
|
||||
assert!(success.is_success());
|
||||
assert!(!success.is_bad_client());
|
||||
|
||||
let bad: HandshakeResult<i32, (), ()> = HandshakeResult::BadClient { reader: (), writer: () };
|
||||
|
||||
let bad: HandshakeResult<i32, (), ()> = HandshakeResult::BadClient {
|
||||
reader: (),
|
||||
writer: (),
|
||||
};
|
||||
assert!(!bad.is_success());
|
||||
assert!(bad.is_bad_client());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_handshake_result_map() {
|
||||
let success: HandshakeResult<i32, (), ()> = HandshakeResult::Success(42);
|
||||
let mapped = success.map(|x| x * 2);
|
||||
|
||||
|
||||
match mapped {
|
||||
HandshakeResult::Success(v) => assert_eq!(v, 84),
|
||||
_ => panic!("Expected success"),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_proxy_error_recoverable() {
|
||||
let err = ProxyError::RateLimited;
|
||||
assert!(err.is_recoverable());
|
||||
|
||||
|
||||
let err = ProxyError::InvalidHandshake("bad".into());
|
||||
assert!(!err.is_recoverable());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_error_display() {
|
||||
let err = ProxyError::ConnectionTimeout { addr: "1.2.3.4:443".into() };
|
||||
let err = ProxyError::ConnectionTimeout {
|
||||
addr: "1.2.3.4:443".into(),
|
||||
};
|
||||
assert!(err.to_string().contains("1.2.3.4:443"));
|
||||
|
||||
|
||||
let err = ProxyError::InvalidProxyProtocol;
|
||||
assert!(err.to_string().contains("proxy protocol"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,252 +1,442 @@
|
||||
// src/ip_tracker.rs
|
||||
// IP address tracking and limiting for users
|
||||
// IP address tracking and per-user unique IP limiting.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::collections::HashMap;
|
||||
use std::net::IpAddr;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use tokio::sync::{Mutex as AsyncMutex, RwLock};
|
||||
|
||||
use crate::config::UserMaxUniqueIpsMode;
|
||||
|
||||
/// Трекер уникальных IP-адресов для каждого пользователя MTProxy
|
||||
///
|
||||
/// Предоставляет thread-safe механизм для:
|
||||
/// - Отслеживания активных IP-адресов каждого пользователя
|
||||
/// - Ограничения количества уникальных IP на пользователя
|
||||
/// - Автоматической очистки при отключении клиентов
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct UserIpTracker {
|
||||
/// Маппинг: Имя пользователя -> Множество активных IP-адресов
|
||||
active_ips: Arc<RwLock<HashMap<String, HashSet<IpAddr>>>>,
|
||||
|
||||
/// Маппинг: Имя пользователя -> Максимально разрешенное количество уникальных IP
|
||||
active_ips: Arc<RwLock<HashMap<String, HashMap<IpAddr, usize>>>>,
|
||||
recent_ips: Arc<RwLock<HashMap<String, HashMap<IpAddr, Instant>>>>,
|
||||
max_ips: Arc<RwLock<HashMap<String, usize>>>,
|
||||
default_max_ips: Arc<RwLock<usize>>,
|
||||
limit_mode: Arc<RwLock<UserMaxUniqueIpsMode>>,
|
||||
limit_window: Arc<RwLock<Duration>>,
|
||||
last_compact_epoch_secs: Arc<AtomicU64>,
|
||||
cleanup_queue: Arc<Mutex<Vec<(String, IpAddr)>>>,
|
||||
cleanup_drain_lock: Arc<AsyncMutex<()>>,
|
||||
}
|
||||
|
||||
impl UserIpTracker {
|
||||
/// Создать новый пустой трекер
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
active_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||
recent_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||
max_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||
default_max_ips: Arc::new(RwLock::new(0)),
|
||||
limit_mode: Arc::new(RwLock::new(UserMaxUniqueIpsMode::ActiveWindow)),
|
||||
limit_window: Arc::new(RwLock::new(Duration::from_secs(30))),
|
||||
last_compact_epoch_secs: Arc::new(AtomicU64::new(0)),
|
||||
cleanup_queue: Arc::new(Mutex::new(Vec::new())),
|
||||
cleanup_drain_lock: Arc::new(AsyncMutex::new(())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Установить лимит уникальных IP для конкретного пользователя
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `username` - Имя пользователя
|
||||
/// * `max_ips` - Максимальное количество одновременно активных IP-адресов
|
||||
pub fn enqueue_cleanup(&self, user: String, ip: IpAddr) {
|
||||
match self.cleanup_queue.lock() {
|
||||
Ok(mut queue) => queue.push((user, ip)),
|
||||
Err(poisoned) => {
|
||||
let mut queue = poisoned.into_inner();
|
||||
queue.push((user.clone(), ip));
|
||||
self.cleanup_queue.clear_poison();
|
||||
tracing::warn!(
|
||||
"UserIpTracker cleanup_queue lock poisoned; recovered and enqueued IP cleanup for {} ({})",
|
||||
user,
|
||||
ip
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn cleanup_queue_len_for_tests(&self) -> usize {
|
||||
self.cleanup_queue
|
||||
.lock()
|
||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
||||
.len()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn cleanup_queue_mutex_for_tests(&self) -> Arc<Mutex<Vec<(String, IpAddr)>>> {
|
||||
Arc::clone(&self.cleanup_queue)
|
||||
}
|
||||
|
||||
pub(crate) async fn drain_cleanup_queue(&self) {
|
||||
// Serialize queue draining and active-IP mutation so check-and-add cannot
|
||||
// observe stale active entries that are already queued for removal.
|
||||
let _drain_guard = self.cleanup_drain_lock.lock().await;
|
||||
let to_remove = {
|
||||
match self.cleanup_queue.lock() {
|
||||
Ok(mut queue) => {
|
||||
if queue.is_empty() {
|
||||
return;
|
||||
}
|
||||
std::mem::take(&mut *queue)
|
||||
}
|
||||
Err(poisoned) => {
|
||||
let mut queue = poisoned.into_inner();
|
||||
if queue.is_empty() {
|
||||
self.cleanup_queue.clear_poison();
|
||||
return;
|
||||
}
|
||||
let drained = std::mem::take(&mut *queue);
|
||||
self.cleanup_queue.clear_poison();
|
||||
drained
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
for (user, ip) in to_remove {
|
||||
if let Some(user_ips) = active_ips.get_mut(&user) {
|
||||
if let Some(count) = user_ips.get_mut(&ip) {
|
||||
if *count > 1 {
|
||||
*count -= 1;
|
||||
} else {
|
||||
user_ips.remove(&ip);
|
||||
}
|
||||
}
|
||||
if user_ips.is_empty() {
|
||||
active_ips.remove(&user);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
|
||||
async fn maybe_compact_empty_users(&self) {
|
||||
const COMPACT_INTERVAL_SECS: u64 = 60;
|
||||
let now_epoch_secs = Self::now_epoch_secs();
|
||||
let last_compact_epoch_secs = self.last_compact_epoch_secs.load(Ordering::Relaxed);
|
||||
if now_epoch_secs.saturating_sub(last_compact_epoch_secs) < COMPACT_INTERVAL_SECS {
|
||||
return;
|
||||
}
|
||||
if self
|
||||
.last_compact_epoch_secs
|
||||
.compare_exchange(
|
||||
last_compact_epoch_secs,
|
||||
now_epoch_secs,
|
||||
Ordering::AcqRel,
|
||||
Ordering::Relaxed,
|
||||
)
|
||||
.is_err()
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
let mut recent_ips = self.recent_ips.write().await;
|
||||
let mut users =
|
||||
Vec::<String>::with_capacity(active_ips.len().saturating_add(recent_ips.len()));
|
||||
users.extend(active_ips.keys().cloned());
|
||||
for user in recent_ips.keys() {
|
||||
if !active_ips.contains_key(user) {
|
||||
users.push(user.clone());
|
||||
}
|
||||
}
|
||||
|
||||
for user in users {
|
||||
let active_empty = active_ips
|
||||
.get(&user)
|
||||
.map(|ips| ips.is_empty())
|
||||
.unwrap_or(true);
|
||||
let recent_empty = recent_ips
|
||||
.get(&user)
|
||||
.map(|ips| ips.is_empty())
|
||||
.unwrap_or(true);
|
||||
if active_empty && recent_empty {
|
||||
active_ips.remove(&user);
|
||||
recent_ips.remove(&user);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn set_limit_policy(&self, mode: UserMaxUniqueIpsMode, window_secs: u64) {
|
||||
{
|
||||
let mut current_mode = self.limit_mode.write().await;
|
||||
*current_mode = mode;
|
||||
}
|
||||
let mut current_window = self.limit_window.write().await;
|
||||
*current_window = Duration::from_secs(window_secs.max(1));
|
||||
}
|
||||
|
||||
pub async fn set_user_limit(&self, username: &str, max_ips: usize) {
|
||||
let mut limits = self.max_ips.write().await;
|
||||
limits.insert(username.to_string(), max_ips);
|
||||
}
|
||||
|
||||
/// Загрузить лимиты из конфигурации
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `limits` - HashMap с лимитами из config.toml
|
||||
pub async fn load_limits(&self, limits: &HashMap<String, usize>) {
|
||||
let mut max_ips = self.max_ips.write().await;
|
||||
for (user, limit) in limits {
|
||||
max_ips.insert(user.clone(), *limit);
|
||||
}
|
||||
pub async fn remove_user_limit(&self, username: &str) {
|
||||
let mut limits = self.max_ips.write().await;
|
||||
limits.remove(username);
|
||||
}
|
||||
|
||||
pub async fn load_limits(&self, default_limit: usize, limits: &HashMap<String, usize>) {
|
||||
let mut default_max_ips = self.default_max_ips.write().await;
|
||||
*default_max_ips = default_limit;
|
||||
drop(default_max_ips);
|
||||
let mut max_ips = self.max_ips.write().await;
|
||||
max_ips.clone_from(limits);
|
||||
}
|
||||
|
||||
fn prune_recent(user_recent: &mut HashMap<IpAddr, Instant>, now: Instant, window: Duration) {
|
||||
if user_recent.is_empty() {
|
||||
return;
|
||||
}
|
||||
user_recent.retain(|_, seen_at| now.duration_since(*seen_at) <= window);
|
||||
}
|
||||
|
||||
/// Проверить, может ли пользователь подключиться с данного IP-адреса
|
||||
/// и добавить IP в список активных, если проверка успешна
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `username` - Имя пользователя
|
||||
/// * `ip` - IP-адрес клиента
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Ok(())` - Подключение разрешено, IP добавлен в активные
|
||||
/// * `Err(String)` - Подключение отклонено с описанием причины
|
||||
pub async fn check_and_add(&self, username: &str, ip: IpAddr) -> Result<(), String> {
|
||||
// Получаем лимит для пользователя
|
||||
let max_ips = self.max_ips.read().await;
|
||||
let limit = match max_ips.get(username) {
|
||||
Some(limit) => *limit,
|
||||
None => {
|
||||
// Если лимит не задан - разрешаем безлимитный доступ
|
||||
drop(max_ips);
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
let user_ips = active_ips
|
||||
.entry(username.to_string())
|
||||
.or_insert_with(HashSet::new);
|
||||
user_ips.insert(ip);
|
||||
return Ok(());
|
||||
}
|
||||
self.drain_cleanup_queue().await;
|
||||
self.maybe_compact_empty_users().await;
|
||||
let default_max_ips = *self.default_max_ips.read().await;
|
||||
let limit = {
|
||||
let max_ips = self.max_ips.read().await;
|
||||
max_ips
|
||||
.get(username)
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((default_max_ips > 0).then_some(default_max_ips))
|
||||
};
|
||||
drop(max_ips);
|
||||
let mode = *self.limit_mode.read().await;
|
||||
let window = *self.limit_window.read().await;
|
||||
let now = Instant::now();
|
||||
|
||||
// Проверяем и обновляем активные IP
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
let user_ips = active_ips
|
||||
let user_active = active_ips
|
||||
.entry(username.to_string())
|
||||
.or_insert_with(HashSet::new);
|
||||
.or_insert_with(HashMap::new);
|
||||
|
||||
// Если IP уже есть в списке - это повторное подключение, разрешаем
|
||||
if user_ips.contains(&ip) {
|
||||
let mut recent_ips = self.recent_ips.write().await;
|
||||
let user_recent = recent_ips
|
||||
.entry(username.to_string())
|
||||
.or_insert_with(HashMap::new);
|
||||
Self::prune_recent(user_recent, now, window);
|
||||
|
||||
if let Some(count) = user_active.get_mut(&ip) {
|
||||
*count = count.saturating_add(1);
|
||||
user_recent.insert(ip, now);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Проверяем, не превышен ли лимит
|
||||
if user_ips.len() >= limit {
|
||||
return Err(format!(
|
||||
"IP limit reached for user '{}': {}/{} unique IPs already connected",
|
||||
username,
|
||||
user_ips.len(),
|
||||
limit
|
||||
));
|
||||
if let Some(limit) = limit {
|
||||
let active_limit_reached = user_active.len() >= limit;
|
||||
let recent_limit_reached = user_recent.len() >= limit;
|
||||
let deny = match mode {
|
||||
UserMaxUniqueIpsMode::ActiveWindow => active_limit_reached,
|
||||
UserMaxUniqueIpsMode::TimeWindow => recent_limit_reached,
|
||||
UserMaxUniqueIpsMode::Combined => active_limit_reached || recent_limit_reached,
|
||||
};
|
||||
|
||||
if deny {
|
||||
return Err(format!(
|
||||
"IP limit reached for user '{}': active={}/{} recent={}/{} mode={:?}",
|
||||
username,
|
||||
user_active.len(),
|
||||
limit,
|
||||
user_recent.len(),
|
||||
limit,
|
||||
mode
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Лимит не превышен - добавляем новый IP
|
||||
user_ips.insert(ip);
|
||||
user_active.insert(ip, 1);
|
||||
user_recent.insert(ip, now);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Удалить IP-адрес из списка активных при отключении клиента
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `username` - Имя пользователя
|
||||
/// * `ip` - IP-адрес отключившегося клиента
|
||||
pub async fn remove_ip(&self, username: &str, ip: IpAddr) {
|
||||
self.maybe_compact_empty_users().await;
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
|
||||
if let Some(user_ips) = active_ips.get_mut(username) {
|
||||
user_ips.remove(&ip);
|
||||
|
||||
// Если у пользователя не осталось активных IP - удаляем запись
|
||||
// для экономии памяти
|
||||
if let Some(count) = user_ips.get_mut(&ip) {
|
||||
if *count > 1 {
|
||||
*count -= 1;
|
||||
} else {
|
||||
user_ips.remove(&ip);
|
||||
}
|
||||
}
|
||||
if user_ips.is_empty() {
|
||||
active_ips.remove(username);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Получить текущее количество активных IP-адресов для пользователя
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `username` - Имя пользователя
|
||||
///
|
||||
/// # Returns
|
||||
/// Количество уникальных активных IP-адресов
|
||||
pub async fn get_active_ip_count(&self, username: &str) -> usize {
|
||||
let active_ips = self.active_ips.read().await;
|
||||
active_ips
|
||||
.get(username)
|
||||
.map(|ips| ips.len())
|
||||
.unwrap_or(0)
|
||||
pub async fn get_recent_counts_for_users(&self, users: &[String]) -> HashMap<String, usize> {
|
||||
self.drain_cleanup_queue().await;
|
||||
let window = *self.limit_window.read().await;
|
||||
let now = Instant::now();
|
||||
let recent_ips = self.recent_ips.read().await;
|
||||
|
||||
let mut counts = HashMap::with_capacity(users.len());
|
||||
for user in users {
|
||||
let count = if let Some(user_recent) = recent_ips.get(user) {
|
||||
user_recent
|
||||
.values()
|
||||
.filter(|seen_at| now.duration_since(**seen_at) <= window)
|
||||
.count()
|
||||
} else {
|
||||
0
|
||||
};
|
||||
counts.insert(user.clone(), count);
|
||||
}
|
||||
counts
|
||||
}
|
||||
|
||||
pub async fn get_active_ips_for_users(&self, users: &[String]) -> HashMap<String, Vec<IpAddr>> {
|
||||
self.drain_cleanup_queue().await;
|
||||
let active_ips = self.active_ips.read().await;
|
||||
let mut out = HashMap::with_capacity(users.len());
|
||||
for user in users {
|
||||
let mut ips = active_ips
|
||||
.get(user)
|
||||
.map(|per_ip| per_ip.keys().copied().collect::<Vec<_>>())
|
||||
.unwrap_or_else(Vec::new);
|
||||
ips.sort();
|
||||
out.insert(user.clone(), ips);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
pub async fn get_recent_ips_for_users(&self, users: &[String]) -> HashMap<String, Vec<IpAddr>> {
|
||||
self.drain_cleanup_queue().await;
|
||||
let window = *self.limit_window.read().await;
|
||||
let now = Instant::now();
|
||||
let recent_ips = self.recent_ips.read().await;
|
||||
|
||||
let mut out = HashMap::with_capacity(users.len());
|
||||
for user in users {
|
||||
let mut ips = if let Some(user_recent) = recent_ips.get(user) {
|
||||
user_recent
|
||||
.iter()
|
||||
.filter(|(_, seen_at)| now.duration_since(**seen_at) <= window)
|
||||
.map(|(ip, _)| *ip)
|
||||
.collect::<Vec<_>>()
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
ips.sort();
|
||||
out.insert(user.clone(), ips);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
pub async fn get_active_ip_count(&self, username: &str) -> usize {
|
||||
self.drain_cleanup_queue().await;
|
||||
let active_ips = self.active_ips.read().await;
|
||||
active_ips.get(username).map(|ips| ips.len()).unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Получить список всех активных IP-адресов для пользователя
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `username` - Имя пользователя
|
||||
///
|
||||
/// # Returns
|
||||
/// Вектор с активными IP-адресами
|
||||
pub async fn get_active_ips(&self, username: &str) -> Vec<IpAddr> {
|
||||
self.drain_cleanup_queue().await;
|
||||
let active_ips = self.active_ips.read().await;
|
||||
active_ips
|
||||
.get(username)
|
||||
.map(|ips| ips.iter().copied().collect())
|
||||
.map(|ips| ips.keys().copied().collect())
|
||||
.unwrap_or_else(Vec::new)
|
||||
}
|
||||
|
||||
/// Получить статистику по всем пользователям
|
||||
///
|
||||
/// # Returns
|
||||
/// Вектор кортежей: (имя_пользователя, количество_активных_IP, лимит)
|
||||
pub async fn get_stats(&self) -> Vec<(String, usize, usize)> {
|
||||
self.drain_cleanup_queue().await;
|
||||
let active_ips = self.active_ips.read().await;
|
||||
let max_ips = self.max_ips.read().await;
|
||||
let default_max_ips = *self.default_max_ips.read().await;
|
||||
|
||||
let mut stats = Vec::new();
|
||||
|
||||
// Собираем статистику по пользователям с активными подключениями
|
||||
for (username, user_ips) in active_ips.iter() {
|
||||
let limit = max_ips.get(username).copied().unwrap_or(0);
|
||||
let limit = max_ips
|
||||
.get(username)
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((default_max_ips > 0).then_some(default_max_ips))
|
||||
.unwrap_or(0);
|
||||
stats.push((username.clone(), user_ips.len(), limit));
|
||||
}
|
||||
|
||||
stats.sort_by(|a, b| a.0.cmp(&b.0)); // Сортируем по имени пользователя
|
||||
|
||||
stats.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
stats
|
||||
}
|
||||
|
||||
/// Очистить все активные IP для пользователя (при необходимости)
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `username` - Имя пользователя
|
||||
pub async fn clear_user_ips(&self, username: &str) {
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
active_ips.remove(username);
|
||||
drop(active_ips);
|
||||
|
||||
let mut recent_ips = self.recent_ips.write().await;
|
||||
recent_ips.remove(username);
|
||||
}
|
||||
|
||||
/// Очистить всю статистику (использовать с осторожностью!)
|
||||
pub async fn clear_all(&self) {
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
active_ips.clear();
|
||||
drop(active_ips);
|
||||
|
||||
let mut recent_ips = self.recent_ips.write().await;
|
||||
recent_ips.clear();
|
||||
}
|
||||
|
||||
/// Проверить, подключен ли пользователь с данного IP
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `username` - Имя пользователя
|
||||
/// * `ip` - IP-адрес для проверки
|
||||
///
|
||||
/// # Returns
|
||||
/// `true` если IP активен, `false` если нет
|
||||
pub async fn is_ip_active(&self, username: &str, ip: IpAddr) -> bool {
|
||||
self.drain_cleanup_queue().await;
|
||||
let active_ips = self.active_ips.read().await;
|
||||
active_ips
|
||||
.get(username)
|
||||
.map(|ips| ips.contains(&ip))
|
||||
.map(|ips| ips.contains_key(&ip))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Получить лимит для пользователя
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `username` - Имя пользователя
|
||||
///
|
||||
/// # Returns
|
||||
/// Лимит IP-адресов или None, если лимит не установлен
|
||||
pub async fn get_user_limit(&self, username: &str) -> Option<usize> {
|
||||
let default_max_ips = *self.default_max_ips.read().await;
|
||||
let max_ips = self.max_ips.read().await;
|
||||
max_ips.get(username).copied()
|
||||
max_ips
|
||||
.get(username)
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((default_max_ips > 0).then_some(default_max_ips))
|
||||
}
|
||||
|
||||
/// Форматировать статистику в читаемый текст
|
||||
///
|
||||
/// # Returns
|
||||
/// Строка со статистикой для логов или мониторинга
|
||||
pub async fn format_stats(&self) -> String {
|
||||
let stats = self.get_stats().await;
|
||||
|
||||
|
||||
if stats.is_empty() {
|
||||
return String::from("No active users");
|
||||
}
|
||||
|
||||
|
||||
let mut output = String::from("User IP Statistics:\n");
|
||||
output.push_str("==================\n");
|
||||
|
||||
|
||||
for (username, active_count, limit) in stats {
|
||||
output.push_str(&format!(
|
||||
"User: {:<20} Active IPs: {}/{}\n",
|
||||
username,
|
||||
active_count,
|
||||
if limit > 0 { limit.to_string() } else { "unlimited".to_string() }
|
||||
if limit > 0 {
|
||||
limit.to_string()
|
||||
} else {
|
||||
"unlimited".to_string()
|
||||
}
|
||||
));
|
||||
|
||||
|
||||
let ips = self.get_active_ips(&username).await;
|
||||
for ip in ips {
|
||||
output.push_str(&format!(" └─ {}\n", ip));
|
||||
output.push_str(&format!(" - {}\n", ip));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
output
|
||||
}
|
||||
}
|
||||
@@ -257,10 +447,6 @@ impl Default for UserIpTracker {
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// ТЕСТЫ
|
||||
// ============================================================================
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -283,17 +469,33 @@ mod tests {
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
let ip3 = test_ipv4(192, 168, 1, 3);
|
||||
|
||||
// Первые два IP должны быть приняты
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
|
||||
// Третий IP должен быть отклонен
|
||||
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
|
||||
|
||||
// Проверяем счетчик
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_active_window_rejects_new_ip_and_keeps_existing_session() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 1).await;
|
||||
tracker
|
||||
.set_limit_policy(UserMaxUniqueIpsMode::ActiveWindow, 30)
|
||||
.await;
|
||||
|
||||
let ip1 = test_ipv4(10, 10, 10, 1);
|
||||
let ip2 = test_ipv4(10, 10, 10, 2);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.is_ip_active("test_user", ip1).await);
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
|
||||
// Existing session remains active; only new unique IP is denied.
|
||||
assert!(tracker.is_ip_active("test_user", ip1).await);
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_reconnection_from_same_ip() {
|
||||
let tracker = UserIpTracker::new();
|
||||
@@ -301,16 +503,29 @@ mod tests {
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
|
||||
// Первое подключение
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
|
||||
// Повторное подключение с того же IP должно пройти
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
|
||||
// Счетчик не должен увеличиться
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_same_ip_disconnect_keeps_active_while_other_session_alive() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 2).await;
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ip_removal() {
|
||||
let tracker = UserIpTracker::new();
|
||||
@@ -320,36 +535,28 @@ mod tests {
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
let ip3 = test_ipv4(192, 168, 1, 3);
|
||||
|
||||
// Добавляем два IP
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
|
||||
// Третий не должен пройти
|
||||
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
|
||||
|
||||
// Удаляем первый IP
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
|
||||
// Теперь третий должен пройти
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip3).await.is_ok());
|
||||
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_no_limit() {
|
||||
let tracker = UserIpTracker::new();
|
||||
// Не устанавливаем лимит для test_user
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
let ip3 = test_ipv4(192, 168, 1, 3);
|
||||
|
||||
// Без лимита все IP должны проходить
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip3).await.is_ok());
|
||||
|
||||
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 3);
|
||||
}
|
||||
|
||||
@@ -362,11 +569,9 @@ mod tests {
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
|
||||
// user1 может использовать 2 IP
|
||||
assert!(tracker.check_and_add("user1", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("user1", ip2).await.is_ok());
|
||||
|
||||
// user2 может использовать только 1 IP
|
||||
assert!(tracker.check_and_add("user2", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("user2", ip2).await.is_err());
|
||||
}
|
||||
@@ -379,10 +584,9 @@ mod tests {
|
||||
let ipv4 = test_ipv4(192, 168, 1, 1);
|
||||
let ipv6 = test_ipv6();
|
||||
|
||||
// Должны работать оба типа адресов
|
||||
assert!(tracker.check_and_add("test_user", ipv4).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ipv6).await.is_ok());
|
||||
|
||||
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
|
||||
}
|
||||
|
||||
@@ -417,8 +621,7 @@ mod tests {
|
||||
|
||||
let stats = tracker.get_stats().await;
|
||||
assert_eq!(stats.len(), 2);
|
||||
|
||||
// Проверяем наличие обоих пользователей в статистике
|
||||
|
||||
assert!(stats.iter().any(|(name, _, _)| name == "user1"));
|
||||
assert!(stats.iter().any(|(name, _, _)| name == "user2"));
|
||||
}
|
||||
@@ -427,10 +630,10 @@ mod tests {
|
||||
async fn test_clear_user_ips() {
|
||||
let tracker = UserIpTracker::new();
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
|
||||
|
||||
tracker.check_and_add("test_user", ip1).await.unwrap();
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
|
||||
|
||||
tracker.clear_user_ips("test_user").await;
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 0);
|
||||
}
|
||||
@@ -440,9 +643,9 @@ mod tests {
|
||||
let tracker = UserIpTracker::new();
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
|
||||
|
||||
tracker.check_and_add("test_user", ip1).await.unwrap();
|
||||
|
||||
|
||||
assert!(tracker.is_ip_active("test_user", ip1).await);
|
||||
assert!(!tracker.is_ip_active("test_user", ip2).await);
|
||||
}
|
||||
@@ -450,15 +653,115 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_load_limits_from_config() {
|
||||
let tracker = UserIpTracker::new();
|
||||
|
||||
|
||||
let mut config_limits = HashMap::new();
|
||||
config_limits.insert("user1".to_string(), 5);
|
||||
config_limits.insert("user2".to_string(), 3);
|
||||
|
||||
tracker.load_limits(&config_limits).await;
|
||||
|
||||
|
||||
tracker.load_limits(0, &config_limits).await;
|
||||
|
||||
assert_eq!(tracker.get_user_limit("user1").await, Some(5));
|
||||
assert_eq!(tracker.get_user_limit("user2").await, Some(3));
|
||||
assert_eq!(tracker.get_user_limit("user3").await, None);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_load_limits_replaces_previous_map() {
|
||||
let tracker = UserIpTracker::new();
|
||||
|
||||
let mut first = HashMap::new();
|
||||
first.insert("user1".to_string(), 2);
|
||||
first.insert("user2".to_string(), 3);
|
||||
tracker.load_limits(0, &first).await;
|
||||
|
||||
let mut second = HashMap::new();
|
||||
second.insert("user2".to_string(), 5);
|
||||
tracker.load_limits(0, &second).await;
|
||||
|
||||
assert_eq!(tracker.get_user_limit("user1").await, None);
|
||||
assert_eq!(tracker.get_user_limit("user2").await, Some(5));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_global_each_limit_applies_without_user_override() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.load_limits(2, &HashMap::new()).await;
|
||||
|
||||
let ip1 = test_ipv4(172, 16, 0, 1);
|
||||
let ip2 = test_ipv4(172, 16, 0, 2);
|
||||
let ip3 = test_ipv4(172, 16, 0, 3);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
|
||||
assert_eq!(tracker.get_user_limit("test_user").await, Some(2));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_user_override_wins_over_global_each_limit() {
|
||||
let tracker = UserIpTracker::new();
|
||||
let mut limits = HashMap::new();
|
||||
limits.insert("test_user".to_string(), 1);
|
||||
tracker.load_limits(3, &limits).await;
|
||||
|
||||
let ip1 = test_ipv4(172, 17, 0, 1);
|
||||
let ip2 = test_ipv4(172, 17, 0, 2);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
assert_eq!(tracker.get_user_limit("test_user").await, Some(1));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_time_window_mode_blocks_recent_ip_churn() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 1).await;
|
||||
tracker
|
||||
.set_limit_policy(UserMaxUniqueIpsMode::TimeWindow, 30)
|
||||
.await;
|
||||
|
||||
let ip1 = test_ipv4(10, 0, 0, 1);
|
||||
let ip2 = test_ipv4(10, 0, 0, 2);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_combined_mode_enforces_active_and_recent_limits() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 1).await;
|
||||
tracker
|
||||
.set_limit_policy(UserMaxUniqueIpsMode::Combined, 30)
|
||||
.await;
|
||||
|
||||
let ip1 = test_ipv4(10, 0, 1, 1);
|
||||
let ip2 = test_ipv4(10, 0, 1, 2);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_time_window_expires() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 1).await;
|
||||
tracker
|
||||
.set_limit_policy(UserMaxUniqueIpsMode::TimeWindow, 1)
|
||||
.await;
|
||||
|
||||
let ip1 = test_ipv4(10, 1, 0, 1);
|
||||
let ip2 = test_ipv4(10, 1, 0, 2);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(1100)).await;
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
130
src/maestro/admission.rs
Normal file
130
src/maestro/admission.rs
Normal file
@@ -0,0 +1,130 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use tokio::sync::watch;
|
||||
use tracing::{info, warn};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
|
||||
const STARTUP_FALLBACK_AFTER: Duration = Duration::from_secs(80);
|
||||
const RUNTIME_FALLBACK_AFTER: Duration = Duration::from_secs(6);
|
||||
|
||||
pub(crate) async fn configure_admission_gate(
|
||||
config: &Arc<ProxyConfig>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
route_runtime: Arc<RouteRuntimeController>,
|
||||
admission_tx: &watch::Sender<bool>,
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
) {
|
||||
if config.general.use_middle_proxy {
|
||||
if let Some(pool) = me_pool.as_ref() {
|
||||
let initial_ready = pool.admission_ready_conditional_cast().await;
|
||||
admission_tx.send_replace(initial_ready);
|
||||
let _ = route_runtime.set_mode(RelayRouteMode::Middle);
|
||||
if initial_ready {
|
||||
info!("Conditional-admission gate: open / ME pool READY");
|
||||
} else {
|
||||
warn!("Conditional-admission gate: closed / ME pool is NOT ready)");
|
||||
}
|
||||
|
||||
let pool_for_gate = pool.clone();
|
||||
let admission_tx_gate = admission_tx.clone();
|
||||
let route_runtime_gate = route_runtime.clone();
|
||||
let mut config_rx_gate = config_rx.clone();
|
||||
let mut admission_poll_ms = config.general.me_admission_poll_ms.max(1);
|
||||
let mut fallback_enabled = config.general.me2dc_fallback;
|
||||
tokio::spawn(async move {
|
||||
let mut gate_open = initial_ready;
|
||||
let mut route_mode = RelayRouteMode::Middle;
|
||||
let mut ready_observed = initial_ready;
|
||||
let mut not_ready_since = if initial_ready {
|
||||
None
|
||||
} else {
|
||||
Some(Instant::now())
|
||||
};
|
||||
loop {
|
||||
tokio::select! {
|
||||
changed = config_rx_gate.changed() => {
|
||||
if changed.is_err() {
|
||||
break;
|
||||
}
|
||||
let cfg = config_rx_gate.borrow_and_update().clone();
|
||||
admission_poll_ms = cfg.general.me_admission_poll_ms.max(1);
|
||||
fallback_enabled = cfg.general.me2dc_fallback;
|
||||
continue;
|
||||
}
|
||||
_ = tokio::time::sleep(Duration::from_millis(admission_poll_ms)) => {}
|
||||
}
|
||||
let ready = pool_for_gate.admission_ready_conditional_cast().await;
|
||||
let now = Instant::now();
|
||||
let (next_gate_open, next_route_mode, next_fallback_active) = if ready {
|
||||
ready_observed = true;
|
||||
not_ready_since = None;
|
||||
(true, RelayRouteMode::Middle, false)
|
||||
} else {
|
||||
let not_ready_started_at = *not_ready_since.get_or_insert(now);
|
||||
let not_ready_for = now.saturating_duration_since(not_ready_started_at);
|
||||
let fallback_after = if ready_observed {
|
||||
RUNTIME_FALLBACK_AFTER
|
||||
} else {
|
||||
STARTUP_FALLBACK_AFTER
|
||||
};
|
||||
if fallback_enabled && not_ready_for > fallback_after {
|
||||
(true, RelayRouteMode::Direct, true)
|
||||
} else {
|
||||
(false, RelayRouteMode::Middle, false)
|
||||
}
|
||||
};
|
||||
|
||||
if next_route_mode != route_mode {
|
||||
route_mode = next_route_mode;
|
||||
if let Some(snapshot) = route_runtime_gate.set_mode(route_mode) {
|
||||
if matches!(route_mode, RelayRouteMode::Middle) {
|
||||
info!(
|
||||
target_mode = route_mode.as_str(),
|
||||
cutover_generation = snapshot.generation,
|
||||
"Middle-End routing restored for new sessions"
|
||||
);
|
||||
} else {
|
||||
let fallback_after = if ready_observed {
|
||||
RUNTIME_FALLBACK_AFTER
|
||||
} else {
|
||||
STARTUP_FALLBACK_AFTER
|
||||
};
|
||||
warn!(
|
||||
target_mode = route_mode.as_str(),
|
||||
cutover_generation = snapshot.generation,
|
||||
grace_secs = fallback_after.as_secs(),
|
||||
"ME pool stayed not-ready beyond grace; routing new sessions via Direct-DC"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if next_gate_open != gate_open {
|
||||
gate_open = next_gate_open;
|
||||
admission_tx_gate.send_replace(gate_open);
|
||||
if gate_open {
|
||||
if next_fallback_active {
|
||||
warn!("Conditional-admission gate opened in ME fallback mode");
|
||||
} else {
|
||||
info!("Conditional-admission gate opened / ME pool READY");
|
||||
}
|
||||
} else {
|
||||
warn!("Conditional-admission gate closed / ME pool is NOT ready");
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
} else {
|
||||
admission_tx.send_replace(false);
|
||||
let _ = route_runtime.set_mode(RelayRouteMode::Direct);
|
||||
warn!("Conditional-admission gate: closed / ME pool is UNAVAILABLE");
|
||||
}
|
||||
} else {
|
||||
admission_tx.send_replace(true);
|
||||
let _ = route_runtime.set_mode(RelayRouteMode::Direct);
|
||||
}
|
||||
}
|
||||
232
src/maestro/connectivity.rs
Normal file
232
src/maestro/connectivity.rs
Normal file
@@ -0,0 +1,232 @@
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::info;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::network::probe::NetworkDecision;
|
||||
use crate::startup::{
|
||||
COMPONENT_DC_CONNECTIVITY_PING, COMPONENT_ME_CONNECTIVITY_PING, COMPONENT_RUNTIME_READY,
|
||||
StartupTracker,
|
||||
};
|
||||
use crate::transport::UpstreamManager;
|
||||
use crate::transport::middle_proxy::{
|
||||
MePingFamily, MePingSample, MePool, format_me_route, format_sample_line, run_me_ping,
|
||||
};
|
||||
|
||||
pub(crate) async fn run_startup_connectivity(
|
||||
config: &Arc<ProxyConfig>,
|
||||
me_pool: &Option<Arc<MePool>>,
|
||||
rng: Arc<SecureRandom>,
|
||||
startup_tracker: &Arc<StartupTracker>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
prefer_ipv6: bool,
|
||||
decision: &NetworkDecision,
|
||||
process_started_at: Instant,
|
||||
api_me_pool: Arc<RwLock<Option<Arc<MePool>>>>,
|
||||
) {
|
||||
if me_pool.is_some() {
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_ME_CONNECTIVITY_PING,
|
||||
Some("run startup ME connectivity check".to_string()),
|
||||
)
|
||||
.await;
|
||||
} else {
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_CONNECTIVITY_PING,
|
||||
Some("ME pool is not available".to_string()),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
if let Some(pool) = me_pool {
|
||||
let me_results = run_me_ping(pool, &rng).await;
|
||||
|
||||
let v4_ok = me_results.iter().any(|r| {
|
||||
matches!(r.family, MePingFamily::V4)
|
||||
&& r.samples
|
||||
.iter()
|
||||
.any(|s| s.error.is_none() && s.handshake_ms.is_some())
|
||||
});
|
||||
let v6_ok = me_results.iter().any(|r| {
|
||||
matches!(r.family, MePingFamily::V6)
|
||||
&& r.samples
|
||||
.iter()
|
||||
.any(|s| s.error.is_none() && s.handshake_ms.is_some())
|
||||
});
|
||||
|
||||
info!("================= Telegram ME Connectivity =================");
|
||||
if v4_ok && v6_ok {
|
||||
info!(" IPv4 and IPv6 available");
|
||||
} else if v4_ok {
|
||||
info!(" IPv4 only / IPv6 unavailable");
|
||||
} else if v6_ok {
|
||||
info!(" IPv6 only / IPv4 unavailable");
|
||||
} else {
|
||||
info!(" No ME connectivity");
|
||||
}
|
||||
let me_route =
|
||||
format_me_route(&config.upstreams, &me_results, prefer_ipv6, v4_ok, v6_ok).await;
|
||||
info!(" via {}", me_route);
|
||||
info!("============================================================");
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
let mut grouped: BTreeMap<i32, Vec<MePingSample>> = BTreeMap::new();
|
||||
for report in me_results {
|
||||
for s in report.samples {
|
||||
grouped.entry(s.dc).or_default().push(s);
|
||||
}
|
||||
}
|
||||
|
||||
let family_order = if prefer_ipv6 {
|
||||
vec![MePingFamily::V6, MePingFamily::V4]
|
||||
} else {
|
||||
vec![MePingFamily::V4, MePingFamily::V6]
|
||||
};
|
||||
|
||||
for (dc, samples) in grouped {
|
||||
for family in &family_order {
|
||||
let fam_samples: Vec<&MePingSample> = samples
|
||||
.iter()
|
||||
.filter(|s| matches!(s.family, f if &f == family))
|
||||
.collect();
|
||||
if fam_samples.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let fam_label = match family {
|
||||
MePingFamily::V4 => "IPv4",
|
||||
MePingFamily::V6 => "IPv6",
|
||||
};
|
||||
info!(" DC{} [{}]", dc, fam_label);
|
||||
for sample in fam_samples {
|
||||
let line = format_sample_line(sample);
|
||||
info!("{}", line);
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("============================================================");
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_ME_CONNECTIVITY_PING,
|
||||
Some("startup ME connectivity check completed".to_string()),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
info!("================= Telegram DC Connectivity =================");
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_DC_CONNECTIVITY_PING,
|
||||
Some("run startup DC connectivity check".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
let ping_results = upstream_manager
|
||||
.ping_all_dcs(
|
||||
prefer_ipv6,
|
||||
&config.dc_overrides,
|
||||
decision.ipv4_dc,
|
||||
decision.ipv6_dc,
|
||||
)
|
||||
.await;
|
||||
|
||||
for upstream_result in &ping_results {
|
||||
let v6_works = upstream_result
|
||||
.v6_results
|
||||
.iter()
|
||||
.any(|r| r.rtt_ms.is_some());
|
||||
let v4_works = upstream_result
|
||||
.v4_results
|
||||
.iter()
|
||||
.any(|r| r.rtt_ms.is_some());
|
||||
|
||||
if upstream_result.both_available {
|
||||
if prefer_ipv6 {
|
||||
info!(" IPv6 in use / IPv4 is fallback");
|
||||
} else {
|
||||
info!(" IPv4 in use / IPv6 is fallback");
|
||||
}
|
||||
} else if v6_works && !v4_works {
|
||||
info!(" IPv6 only / IPv4 unavailable");
|
||||
} else if v4_works && !v6_works {
|
||||
info!(" IPv4 only / IPv6 unavailable");
|
||||
} else if !v6_works && !v4_works {
|
||||
info!(" No DC connectivity");
|
||||
}
|
||||
|
||||
info!(" via {}", upstream_result.upstream_name);
|
||||
info!("============================================================");
|
||||
|
||||
if v6_works {
|
||||
for dc in &upstream_result.v6_results {
|
||||
let addr_str = format!("{}:{}", dc.dc_addr.ip(), dc.dc_addr.port());
|
||||
match &dc.rtt_ms {
|
||||
Some(rtt) => {
|
||||
info!(" DC{} [IPv6] {} - {:.0} ms", dc.dc_idx, addr_str, rtt);
|
||||
}
|
||||
None => {
|
||||
let err = dc.error.as_deref().unwrap_or("fail");
|
||||
info!(" DC{} [IPv6] {} - FAIL ({})", dc.dc_idx, addr_str, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("============================================================");
|
||||
}
|
||||
|
||||
if v4_works {
|
||||
for dc in &upstream_result.v4_results {
|
||||
let addr_str = format!("{}:{}", dc.dc_addr.ip(), dc.dc_addr.port());
|
||||
match &dc.rtt_ms {
|
||||
Some(rtt) => {
|
||||
info!(
|
||||
" DC{} [IPv4] {}\t\t\t\t{:.0} ms",
|
||||
dc.dc_idx, addr_str, rtt
|
||||
);
|
||||
}
|
||||
None => {
|
||||
let err = dc.error.as_deref().unwrap_or("fail");
|
||||
info!(
|
||||
" DC{} [IPv4] {}:\t\t\t\tFAIL ({})",
|
||||
dc.dc_idx, addr_str, err
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("============================================================");
|
||||
}
|
||||
}
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_DC_CONNECTIVITY_PING,
|
||||
Some("startup DC connectivity check completed".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
let initialized_secs = process_started_at.elapsed().as_secs();
|
||||
let second_suffix = if initialized_secs == 1 { "" } else { "s" };
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_RUNTIME_READY,
|
||||
Some("finalize startup runtime state".to_string()),
|
||||
)
|
||||
.await;
|
||||
info!("===================== Telegram Startup =====================");
|
||||
info!(
|
||||
" DC/ME Initialized in {} second{}",
|
||||
initialized_secs, second_suffix
|
||||
);
|
||||
info!("============================================================");
|
||||
|
||||
if let Some(pool) = me_pool {
|
||||
pool.set_runtime_ready(true);
|
||||
}
|
||||
*api_me_pool.write().await = me_pool.clone();
|
||||
}
|
||||
402
src/maestro/helpers.rs
Normal file
402
src/maestro/helpers.rs
Normal file
@@ -0,0 +1,402 @@
|
||||
#![allow(clippy::items_after_test_module)]
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::sync::watch;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use crate::cli;
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::transport::UpstreamManager;
|
||||
use crate::transport::middle_proxy::{
|
||||
ProxyConfigData, fetch_proxy_config_with_raw_via_upstream, load_proxy_config_cache,
|
||||
save_proxy_config_cache,
|
||||
};
|
||||
|
||||
pub(crate) fn resolve_runtime_config_path(
|
||||
config_path_cli: &str,
|
||||
startup_cwd: &std::path::Path,
|
||||
) -> PathBuf {
|
||||
let raw = PathBuf::from(config_path_cli);
|
||||
let absolute = if raw.is_absolute() {
|
||||
raw
|
||||
} else {
|
||||
startup_cwd.join(raw)
|
||||
};
|
||||
absolute.canonicalize().unwrap_or(absolute)
|
||||
}
|
||||
|
||||
pub(crate) fn parse_cli() -> (String, Option<PathBuf>, bool, Option<String>) {
|
||||
let mut config_path = "config.toml".to_string();
|
||||
let mut data_path: Option<PathBuf> = None;
|
||||
let mut silent = false;
|
||||
let mut log_level: Option<String> = None;
|
||||
|
||||
let args: Vec<String> = std::env::args().skip(1).collect();
|
||||
|
||||
// Check for --init first (handled before tokio)
|
||||
if let Some(init_opts) = cli::parse_init_args(&args) {
|
||||
if let Err(e) = cli::run_init(init_opts) {
|
||||
eprintln!("[telemt] Init failed: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
std::process::exit(0);
|
||||
}
|
||||
|
||||
let mut i = 0;
|
||||
while i < args.len() {
|
||||
match args[i].as_str() {
|
||||
"--data-path" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
data_path = Some(PathBuf::from(args[i].clone()));
|
||||
} else {
|
||||
eprintln!("Missing value for --data-path");
|
||||
std::process::exit(0);
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--data-path=") => {
|
||||
data_path = Some(PathBuf::from(
|
||||
s.trim_start_matches("--data-path=").to_string(),
|
||||
));
|
||||
}
|
||||
"--silent" | "-s" => {
|
||||
silent = true;
|
||||
}
|
||||
"--log-level" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
log_level = Some(args[i].clone());
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--log-level=") => {
|
||||
log_level = Some(s.trim_start_matches("--log-level=").to_string());
|
||||
}
|
||||
"--help" | "-h" => {
|
||||
eprintln!("Usage: telemt [config.toml] [OPTIONS]");
|
||||
eprintln!();
|
||||
eprintln!("Options:");
|
||||
eprintln!(
|
||||
" --data-path <DIR> Set data directory (absolute path; overrides config value)"
|
||||
);
|
||||
eprintln!(" --silent, -s Suppress info logs");
|
||||
eprintln!(" --log-level <LEVEL> debug|verbose|normal|silent");
|
||||
eprintln!(" --help, -h Show this help");
|
||||
eprintln!();
|
||||
eprintln!("Setup (fire-and-forget):");
|
||||
eprintln!(
|
||||
" --init Generate config, install systemd service, start"
|
||||
);
|
||||
eprintln!(" --port <PORT> Listen port (default: 443)");
|
||||
eprintln!(
|
||||
" --domain <DOMAIN> TLS domain for masking (default: www.google.com)"
|
||||
);
|
||||
eprintln!(
|
||||
" --secret <HEX> 32-char hex secret (auto-generated if omitted)"
|
||||
);
|
||||
eprintln!(" --user <NAME> Username (default: user)");
|
||||
eprintln!(" --config-dir <DIR> Config directory (default: /etc/telemt)");
|
||||
eprintln!(" --no-start Don't start the service after install");
|
||||
std::process::exit(0);
|
||||
}
|
||||
"--version" | "-V" => {
|
||||
println!("telemt {}", env!("CARGO_PKG_VERSION"));
|
||||
std::process::exit(0);
|
||||
}
|
||||
s if !s.starts_with('-') => {
|
||||
config_path = s.to_string();
|
||||
}
|
||||
other => {
|
||||
eprintln!("Unknown option: {}", other);
|
||||
}
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
(config_path, data_path, silent, log_level)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::resolve_runtime_config_path;
|
||||
|
||||
#[test]
|
||||
fn resolve_runtime_config_path_anchors_relative_to_startup_cwd() {
|
||||
let nonce = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
let startup_cwd = std::env::temp_dir().join(format!("telemt_cfg_path_{nonce}"));
|
||||
std::fs::create_dir_all(&startup_cwd).unwrap();
|
||||
let target = startup_cwd.join("config.toml");
|
||||
std::fs::write(&target, " ").unwrap();
|
||||
|
||||
let resolved = resolve_runtime_config_path("config.toml", &startup_cwd);
|
||||
assert_eq!(resolved, target.canonicalize().unwrap());
|
||||
|
||||
let _ = std::fs::remove_file(&target);
|
||||
let _ = std::fs::remove_dir(&startup_cwd);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_runtime_config_path_keeps_absolute_for_missing_file() {
|
||||
let nonce = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
let startup_cwd = std::env::temp_dir().join(format!("telemt_cfg_path_missing_{nonce}"));
|
||||
std::fs::create_dir_all(&startup_cwd).unwrap();
|
||||
|
||||
let resolved = resolve_runtime_config_path("missing.toml", &startup_cwd);
|
||||
assert_eq!(resolved, startup_cwd.join("missing.toml"));
|
||||
|
||||
let _ = std::fs::remove_dir(&startup_cwd);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn print_proxy_links(host: &str, port: u16, config: &ProxyConfig) {
|
||||
info!(target: "telemt::links", "--- Proxy Links ({}) ---", host);
|
||||
for user_name in config
|
||||
.general
|
||||
.links
|
||||
.show
|
||||
.resolve_users(&config.access.users)
|
||||
{
|
||||
if let Some(secret) = config.access.users.get(user_name) {
|
||||
info!(target: "telemt::links", "User: {}", user_name);
|
||||
if config.general.modes.classic {
|
||||
info!(
|
||||
target: "telemt::links",
|
||||
" Classic: tg://proxy?server={}&port={}&secret={}",
|
||||
host, port, secret
|
||||
);
|
||||
}
|
||||
if config.general.modes.secure {
|
||||
info!(
|
||||
target: "telemt::links",
|
||||
" DD: tg://proxy?server={}&port={}&secret=dd{}",
|
||||
host, port, secret
|
||||
);
|
||||
}
|
||||
if config.general.modes.tls {
|
||||
let mut domains = Vec::with_capacity(1 + config.censorship.tls_domains.len());
|
||||
domains.push(config.censorship.tls_domain.clone());
|
||||
for d in &config.censorship.tls_domains {
|
||||
if !domains.contains(d) {
|
||||
domains.push(d.clone());
|
||||
}
|
||||
}
|
||||
|
||||
for domain in domains {
|
||||
let domain_hex = hex::encode(&domain);
|
||||
info!(
|
||||
target: "telemt::links",
|
||||
" EE-TLS: tg://proxy?server={}&port={}&secret=ee{}{}",
|
||||
host, port, secret, domain_hex
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!(target: "telemt::links", "User '{}' in show_link not found", user_name);
|
||||
}
|
||||
}
|
||||
info!(target: "telemt::links", "------------------------");
|
||||
}
|
||||
|
||||
pub(crate) async fn write_beobachten_snapshot(path: &str, payload: &str) -> std::io::Result<()> {
|
||||
if let Some(parent) = std::path::Path::new(path).parent()
|
||||
&& !parent.as_os_str().is_empty()
|
||||
{
|
||||
tokio::fs::create_dir_all(parent).await?;
|
||||
}
|
||||
tokio::fs::write(path, payload).await
|
||||
}
|
||||
|
||||
pub(crate) fn unit_label(value: u64, singular: &'static str, plural: &'static str) -> &'static str {
|
||||
if value == 1 { singular } else { plural }
|
||||
}
|
||||
|
||||
pub(crate) fn format_uptime(total_secs: u64) -> String {
|
||||
const SECS_PER_MINUTE: u64 = 60;
|
||||
const SECS_PER_HOUR: u64 = 60 * SECS_PER_MINUTE;
|
||||
const SECS_PER_DAY: u64 = 24 * SECS_PER_HOUR;
|
||||
const SECS_PER_MONTH: u64 = 30 * SECS_PER_DAY;
|
||||
const SECS_PER_YEAR: u64 = 12 * SECS_PER_MONTH;
|
||||
|
||||
let mut remaining = total_secs;
|
||||
let years = remaining / SECS_PER_YEAR;
|
||||
remaining %= SECS_PER_YEAR;
|
||||
let months = remaining / SECS_PER_MONTH;
|
||||
remaining %= SECS_PER_MONTH;
|
||||
let days = remaining / SECS_PER_DAY;
|
||||
remaining %= SECS_PER_DAY;
|
||||
let hours = remaining / SECS_PER_HOUR;
|
||||
remaining %= SECS_PER_HOUR;
|
||||
let minutes = remaining / SECS_PER_MINUTE;
|
||||
let seconds = remaining % SECS_PER_MINUTE;
|
||||
|
||||
let mut parts = Vec::new();
|
||||
if total_secs > SECS_PER_YEAR {
|
||||
parts.push(format!("{} {}", years, unit_label(years, "year", "years")));
|
||||
}
|
||||
if total_secs > SECS_PER_MONTH {
|
||||
parts.push(format!(
|
||||
"{} {}",
|
||||
months,
|
||||
unit_label(months, "month", "months")
|
||||
));
|
||||
}
|
||||
if total_secs > SECS_PER_DAY {
|
||||
parts.push(format!("{} {}", days, unit_label(days, "day", "days")));
|
||||
}
|
||||
if total_secs > SECS_PER_HOUR {
|
||||
parts.push(format!("{} {}", hours, unit_label(hours, "hour", "hours")));
|
||||
}
|
||||
if total_secs > SECS_PER_MINUTE {
|
||||
parts.push(format!(
|
||||
"{} {}",
|
||||
minutes,
|
||||
unit_label(minutes, "minute", "minutes")
|
||||
));
|
||||
}
|
||||
parts.push(format!(
|
||||
"{} {}",
|
||||
seconds,
|
||||
unit_label(seconds, "second", "seconds")
|
||||
));
|
||||
|
||||
format!("{} / {} seconds", parts.join(", "), total_secs)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) async fn wait_until_admission_open(admission_rx: &mut watch::Receiver<bool>) -> bool {
|
||||
loop {
|
||||
if *admission_rx.borrow() {
|
||||
return true;
|
||||
}
|
||||
if admission_rx.changed().await.is_err() {
|
||||
return *admission_rx.borrow();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn is_expected_handshake_eof(err: &crate::error::ProxyError) -> bool {
|
||||
err.to_string().contains("expected 64 bytes, got 0")
|
||||
}
|
||||
|
||||
pub(crate) async fn load_startup_proxy_config_snapshot(
|
||||
url: &str,
|
||||
cache_path: Option<&str>,
|
||||
me2dc_fallback: bool,
|
||||
label: &'static str,
|
||||
upstream: Option<std::sync::Arc<UpstreamManager>>,
|
||||
) -> Option<ProxyConfigData> {
|
||||
loop {
|
||||
match fetch_proxy_config_with_raw_via_upstream(url, upstream.clone()).await {
|
||||
Ok((cfg, raw)) => {
|
||||
if !cfg.map.is_empty() {
|
||||
if let Some(path) = cache_path
|
||||
&& let Err(e) = save_proxy_config_cache(path, &raw).await
|
||||
{
|
||||
warn!(error = %e, path, snapshot = label, "Failed to store startup proxy-config cache");
|
||||
}
|
||||
return Some(cfg);
|
||||
}
|
||||
|
||||
warn!(
|
||||
snapshot = label,
|
||||
url, "Startup proxy-config is empty; trying disk cache"
|
||||
);
|
||||
if let Some(path) = cache_path {
|
||||
match load_proxy_config_cache(path).await {
|
||||
Ok(cached) if !cached.map.is_empty() => {
|
||||
info!(
|
||||
snapshot = label,
|
||||
path,
|
||||
proxy_for_lines = cached.proxy_for_lines,
|
||||
"Loaded startup proxy-config from disk cache"
|
||||
);
|
||||
return Some(cached);
|
||||
}
|
||||
Ok(_) => {
|
||||
warn!(
|
||||
snapshot = label,
|
||||
path, "Startup proxy-config cache is empty; ignoring cache file"
|
||||
);
|
||||
}
|
||||
Err(cache_err) => {
|
||||
debug!(
|
||||
snapshot = label,
|
||||
path,
|
||||
error = %cache_err,
|
||||
"Startup proxy-config cache unavailable"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if me2dc_fallback {
|
||||
error!(
|
||||
snapshot = label,
|
||||
"Startup proxy-config unavailable and no saved config found; falling back to direct mode"
|
||||
);
|
||||
return None;
|
||||
}
|
||||
|
||||
warn!(
|
||||
snapshot = label,
|
||||
retry_in_secs = 2,
|
||||
"Startup proxy-config unavailable and no saved config found; retrying because me2dc_fallback=false"
|
||||
);
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
Err(fetch_err) => {
|
||||
if let Some(path) = cache_path {
|
||||
match load_proxy_config_cache(path).await {
|
||||
Ok(cached) if !cached.map.is_empty() => {
|
||||
info!(
|
||||
snapshot = label,
|
||||
path,
|
||||
proxy_for_lines = cached.proxy_for_lines,
|
||||
"Loaded startup proxy-config from disk cache"
|
||||
);
|
||||
return Some(cached);
|
||||
}
|
||||
Ok(_) => {
|
||||
warn!(
|
||||
snapshot = label,
|
||||
path, "Startup proxy-config cache is empty; ignoring cache file"
|
||||
);
|
||||
}
|
||||
Err(cache_err) => {
|
||||
debug!(
|
||||
snapshot = label,
|
||||
path,
|
||||
error = %cache_err,
|
||||
"Startup proxy-config cache unavailable"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if me2dc_fallback {
|
||||
error!(
|
||||
snapshot = label,
|
||||
error = %fetch_err,
|
||||
"Startup proxy-config unavailable and no cached data; falling back to direct mode"
|
||||
);
|
||||
return None;
|
||||
}
|
||||
|
||||
warn!(
|
||||
snapshot = label,
|
||||
error = %fetch_err,
|
||||
retry_in_secs = 2,
|
||||
"Startup proxy-config unavailable; retrying because me2dc_fallback=false"
|
||||
);
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
534
src/maestro/listeners.rs
Normal file
534
src/maestro/listeners.rs
Normal file
@@ -0,0 +1,534 @@
|
||||
use std::error::Error;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::net::TcpListener;
|
||||
#[cfg(unix)]
|
||||
use tokio::net::UnixListener;
|
||||
use tokio::sync::{Semaphore, watch};
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::proxy::ClientHandler;
|
||||
use crate::proxy::route_mode::{ROUTE_SWITCH_ERROR_MSG, RouteRuntimeController};
|
||||
use crate::startup::{COMPONENT_LISTENERS_BIND, StartupTracker};
|
||||
use crate::stats::beobachten::BeobachtenStore;
|
||||
use crate::stats::{ReplayChecker, Stats};
|
||||
use crate::stream::BufferPool;
|
||||
use crate::tls_front::TlsFrontCache;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
use crate::transport::{ListenOptions, UpstreamManager, create_listener, find_listener_processes};
|
||||
|
||||
use super::helpers::{is_expected_handshake_eof, print_proxy_links};
|
||||
|
||||
pub(crate) struct BoundListeners {
|
||||
pub(crate) listeners: Vec<(TcpListener, bool)>,
|
||||
pub(crate) has_unix_listener: bool,
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) async fn bind_listeners(
|
||||
config: &Arc<ProxyConfig>,
|
||||
decision_ipv4_dc: bool,
|
||||
decision_ipv6_dc: bool,
|
||||
detected_ip_v4: Option<IpAddr>,
|
||||
detected_ip_v6: Option<IpAddr>,
|
||||
startup_tracker: &Arc<StartupTracker>,
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
admission_rx: watch::Receiver<bool>,
|
||||
stats: Arc<Stats>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
replay_checker: Arc<ReplayChecker>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
route_runtime: Arc<RouteRuntimeController>,
|
||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
max_connections: Arc<Semaphore>,
|
||||
) -> Result<BoundListeners, Box<dyn Error>> {
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_LISTENERS_BIND,
|
||||
Some("bind TCP/Unix listeners".to_string()),
|
||||
)
|
||||
.await;
|
||||
let mut listeners = Vec::new();
|
||||
|
||||
for listener_conf in &config.server.listeners {
|
||||
let addr = SocketAddr::new(listener_conf.ip, config.server.port);
|
||||
if addr.is_ipv4() && !decision_ipv4_dc {
|
||||
warn!(%addr, "Skipping IPv4 listener: IPv4 disabled by [network]");
|
||||
continue;
|
||||
}
|
||||
if addr.is_ipv6() && !decision_ipv6_dc {
|
||||
warn!(%addr, "Skipping IPv6 listener: IPv6 disabled by [network]");
|
||||
continue;
|
||||
}
|
||||
let options = ListenOptions {
|
||||
reuse_port: listener_conf.reuse_allow,
|
||||
ipv6_only: listener_conf.ip.is_ipv6(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
match create_listener(addr, &options) {
|
||||
Ok(socket) => {
|
||||
let listener = TcpListener::from_std(socket.into())?;
|
||||
info!("Listening on {}", addr);
|
||||
let listener_proxy_protocol = listener_conf
|
||||
.proxy_protocol
|
||||
.unwrap_or(config.server.proxy_protocol);
|
||||
|
||||
let public_host = if let Some(ref announce) = listener_conf.announce {
|
||||
announce.clone()
|
||||
} else if listener_conf.ip.is_unspecified() {
|
||||
if listener_conf.ip.is_ipv4() {
|
||||
detected_ip_v4
|
||||
.map(|ip| ip.to_string())
|
||||
.unwrap_or_else(|| listener_conf.ip.to_string())
|
||||
} else {
|
||||
detected_ip_v6
|
||||
.map(|ip| ip.to_string())
|
||||
.unwrap_or_else(|| listener_conf.ip.to_string())
|
||||
}
|
||||
} else {
|
||||
listener_conf.ip.to_string()
|
||||
};
|
||||
|
||||
if config.general.links.public_host.is_none()
|
||||
&& !config.general.links.show.is_empty()
|
||||
{
|
||||
let link_port = config
|
||||
.general
|
||||
.links
|
||||
.public_port
|
||||
.unwrap_or(config.server.port);
|
||||
print_proxy_links(&public_host, link_port, config);
|
||||
}
|
||||
|
||||
listeners.push((listener, listener_proxy_protocol));
|
||||
}
|
||||
Err(e) => {
|
||||
if e.kind() == std::io::ErrorKind::AddrInUse {
|
||||
let owners = find_listener_processes(addr);
|
||||
if owners.is_empty() {
|
||||
error!(
|
||||
%addr,
|
||||
"Failed to bind: address already in use (owner process unresolved)"
|
||||
);
|
||||
} else {
|
||||
for owner in owners {
|
||||
error!(
|
||||
%addr,
|
||||
pid = owner.pid,
|
||||
process = %owner.process,
|
||||
"Failed to bind: address already in use"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if !listener_conf.reuse_allow {
|
||||
error!(
|
||||
%addr,
|
||||
"reuse_allow=false; set [[server.listeners]].reuse_allow=true to allow multi-instance listening"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
error!("Failed to bind to {}: {}", addr, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !config.general.links.show.is_empty()
|
||||
&& (config.general.links.public_host.is_some() || listeners.is_empty())
|
||||
{
|
||||
let (host, port) = if let Some(ref h) = config.general.links.public_host {
|
||||
(
|
||||
h.clone(),
|
||||
config
|
||||
.general
|
||||
.links
|
||||
.public_port
|
||||
.unwrap_or(config.server.port),
|
||||
)
|
||||
} else {
|
||||
let ip = detected_ip_v4.or(detected_ip_v6).map(|ip| ip.to_string());
|
||||
if ip.is_none() {
|
||||
warn!(
|
||||
"show_link is configured but public IP could not be detected. Set public_host in config."
|
||||
);
|
||||
}
|
||||
(
|
||||
ip.unwrap_or_else(|| "UNKNOWN".to_string()),
|
||||
config
|
||||
.general
|
||||
.links
|
||||
.public_port
|
||||
.unwrap_or(config.server.port),
|
||||
)
|
||||
};
|
||||
|
||||
print_proxy_links(&host, port, config);
|
||||
}
|
||||
|
||||
let mut has_unix_listener = false;
|
||||
#[cfg(unix)]
|
||||
if let Some(ref unix_path) = config.server.listen_unix_sock {
|
||||
let _ = tokio::fs::remove_file(unix_path).await;
|
||||
|
||||
let unix_listener = UnixListener::bind(unix_path)?;
|
||||
|
||||
if let Some(ref perm_str) = config.server.listen_unix_sock_perm {
|
||||
match u32::from_str_radix(perm_str.trim_start_matches('0'), 8) {
|
||||
Ok(mode) => {
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let perms = std::fs::Permissions::from_mode(mode);
|
||||
if let Err(e) = std::fs::set_permissions(unix_path, perms) {
|
||||
error!(
|
||||
"Failed to set unix socket permissions to {}: {}",
|
||||
perm_str, e
|
||||
);
|
||||
} else {
|
||||
info!("Listening on unix:{} (mode {})", unix_path, perm_str);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Invalid listen_unix_sock_perm '{}': {}. Ignoring.",
|
||||
perm_str, e
|
||||
);
|
||||
info!("Listening on unix:{}", unix_path);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("Listening on unix:{}", unix_path);
|
||||
}
|
||||
|
||||
has_unix_listener = true;
|
||||
|
||||
let mut config_rx_unix: watch::Receiver<Arc<ProxyConfig>> = config_rx.clone();
|
||||
let admission_rx_unix = admission_rx.clone();
|
||||
let stats = stats.clone();
|
||||
let upstream_manager = upstream_manager.clone();
|
||||
let replay_checker = replay_checker.clone();
|
||||
let buffer_pool = buffer_pool.clone();
|
||||
let rng = rng.clone();
|
||||
let me_pool = me_pool.clone();
|
||||
let route_runtime = route_runtime.clone();
|
||||
let tls_cache = tls_cache.clone();
|
||||
let ip_tracker = ip_tracker.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
let max_connections_unix = max_connections.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let unix_conn_counter = Arc::new(std::sync::atomic::AtomicU64::new(1));
|
||||
|
||||
loop {
|
||||
match unix_listener.accept().await {
|
||||
Ok((stream, _)) => {
|
||||
if !*admission_rx_unix.borrow() {
|
||||
drop(stream);
|
||||
continue;
|
||||
}
|
||||
let accept_permit_timeout_ms =
|
||||
config_rx_unix.borrow().server.accept_permit_timeout_ms;
|
||||
let permit = if accept_permit_timeout_ms == 0 {
|
||||
match max_connections_unix.clone().acquire_owned().await {
|
||||
Ok(permit) => permit,
|
||||
Err(_) => {
|
||||
error!("Connection limiter is closed");
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match tokio::time::timeout(
|
||||
Duration::from_millis(accept_permit_timeout_ms),
|
||||
max_connections_unix.clone().acquire_owned(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Ok(permit)) => permit,
|
||||
Ok(Err(_)) => {
|
||||
error!("Connection limiter is closed");
|
||||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
debug!(
|
||||
timeout_ms = accept_permit_timeout_ms,
|
||||
"Dropping accepted unix connection: permit wait timeout"
|
||||
);
|
||||
drop(stream);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
};
|
||||
let conn_id =
|
||||
unix_conn_counter.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
let fake_peer =
|
||||
SocketAddr::from(([127, 0, 0, 1], (conn_id % 65535) as u16));
|
||||
|
||||
let config = config_rx_unix.borrow_and_update().clone();
|
||||
let stats = stats.clone();
|
||||
let upstream_manager = upstream_manager.clone();
|
||||
let replay_checker = replay_checker.clone();
|
||||
let buffer_pool = buffer_pool.clone();
|
||||
let rng = rng.clone();
|
||||
let me_pool = me_pool.clone();
|
||||
let route_runtime = route_runtime.clone();
|
||||
let tls_cache = tls_cache.clone();
|
||||
let ip_tracker = ip_tracker.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
let proxy_protocol_enabled = config.server.proxy_protocol;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let _permit = permit;
|
||||
if let Err(e) = crate::proxy::client::handle_client_stream(
|
||||
stream,
|
||||
fake_peer,
|
||||
config,
|
||||
stats,
|
||||
upstream_manager,
|
||||
replay_checker,
|
||||
buffer_pool,
|
||||
rng,
|
||||
me_pool,
|
||||
route_runtime,
|
||||
tls_cache,
|
||||
ip_tracker,
|
||||
beobachten,
|
||||
proxy_protocol_enabled,
|
||||
)
|
||||
.await
|
||||
{
|
||||
debug!(error = %e, "Unix socket connection error");
|
||||
}
|
||||
});
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Unix socket accept error: {}", e);
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_LISTENERS_BIND,
|
||||
Some(format!(
|
||||
"listeners configured tcp={} unix={}",
|
||||
listeners.len(),
|
||||
has_unix_listener
|
||||
)),
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(BoundListeners {
|
||||
listeners,
|
||||
has_unix_listener,
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn spawn_tcp_accept_loops(
|
||||
listeners: Vec<(TcpListener, bool)>,
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
admission_rx: watch::Receiver<bool>,
|
||||
stats: Arc<Stats>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
replay_checker: Arc<ReplayChecker>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
route_runtime: Arc<RouteRuntimeController>,
|
||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
max_connections: Arc<Semaphore>,
|
||||
) {
|
||||
for (listener, listener_proxy_protocol) in listeners {
|
||||
let mut config_rx: watch::Receiver<Arc<ProxyConfig>> = config_rx.clone();
|
||||
let admission_rx_tcp = admission_rx.clone();
|
||||
let stats = stats.clone();
|
||||
let upstream_manager = upstream_manager.clone();
|
||||
let replay_checker = replay_checker.clone();
|
||||
let buffer_pool = buffer_pool.clone();
|
||||
let rng = rng.clone();
|
||||
let me_pool = me_pool.clone();
|
||||
let route_runtime = route_runtime.clone();
|
||||
let tls_cache = tls_cache.clone();
|
||||
let ip_tracker = ip_tracker.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
let max_connections_tcp = max_connections.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
match listener.accept().await {
|
||||
Ok((stream, peer_addr)) => {
|
||||
if !*admission_rx_tcp.borrow() {
|
||||
debug!(peer = %peer_addr, "Admission gate closed, dropping connection");
|
||||
drop(stream);
|
||||
continue;
|
||||
}
|
||||
let accept_permit_timeout_ms =
|
||||
config_rx.borrow().server.accept_permit_timeout_ms;
|
||||
let permit = if accept_permit_timeout_ms == 0 {
|
||||
match max_connections_tcp.clone().acquire_owned().await {
|
||||
Ok(permit) => permit,
|
||||
Err(_) => {
|
||||
error!("Connection limiter is closed");
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match tokio::time::timeout(
|
||||
Duration::from_millis(accept_permit_timeout_ms),
|
||||
max_connections_tcp.clone().acquire_owned(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Ok(permit)) => permit,
|
||||
Ok(Err(_)) => {
|
||||
error!("Connection limiter is closed");
|
||||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
debug!(
|
||||
peer = %peer_addr,
|
||||
timeout_ms = accept_permit_timeout_ms,
|
||||
"Dropping accepted connection: permit wait timeout"
|
||||
);
|
||||
drop(stream);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
};
|
||||
let config = config_rx.borrow_and_update().clone();
|
||||
let stats = stats.clone();
|
||||
let upstream_manager = upstream_manager.clone();
|
||||
let replay_checker = replay_checker.clone();
|
||||
let buffer_pool = buffer_pool.clone();
|
||||
let rng = rng.clone();
|
||||
let me_pool = me_pool.clone();
|
||||
let route_runtime = route_runtime.clone();
|
||||
let tls_cache = tls_cache.clone();
|
||||
let ip_tracker = ip_tracker.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
let proxy_protocol_enabled = listener_proxy_protocol;
|
||||
let real_peer_report = Arc::new(std::sync::Mutex::new(None));
|
||||
let real_peer_report_for_handler = real_peer_report.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let _permit = permit;
|
||||
if let Err(e) = ClientHandler::new(
|
||||
stream,
|
||||
peer_addr,
|
||||
config,
|
||||
stats,
|
||||
upstream_manager,
|
||||
replay_checker,
|
||||
buffer_pool,
|
||||
rng,
|
||||
me_pool,
|
||||
route_runtime,
|
||||
tls_cache,
|
||||
ip_tracker,
|
||||
beobachten,
|
||||
proxy_protocol_enabled,
|
||||
real_peer_report_for_handler,
|
||||
)
|
||||
.run()
|
||||
.await
|
||||
{
|
||||
let real_peer = match real_peer_report.lock() {
|
||||
Ok(guard) => *guard,
|
||||
Err(_) => None,
|
||||
};
|
||||
let peer_closed = matches!(
|
||||
&e,
|
||||
crate::error::ProxyError::Io(ioe)
|
||||
if matches!(
|
||||
ioe.kind(),
|
||||
std::io::ErrorKind::ConnectionReset
|
||||
| std::io::ErrorKind::ConnectionAborted
|
||||
| std::io::ErrorKind::BrokenPipe
|
||||
| std::io::ErrorKind::NotConnected
|
||||
)
|
||||
) || matches!(
|
||||
&e,
|
||||
crate::error::ProxyError::Stream(
|
||||
crate::error::StreamError::Io(ioe)
|
||||
)
|
||||
if matches!(
|
||||
ioe.kind(),
|
||||
std::io::ErrorKind::ConnectionReset
|
||||
| std::io::ErrorKind::ConnectionAborted
|
||||
| std::io::ErrorKind::BrokenPipe
|
||||
| std::io::ErrorKind::NotConnected
|
||||
)
|
||||
);
|
||||
|
||||
let me_closed = matches!(
|
||||
&e,
|
||||
crate::error::ProxyError::Proxy(msg) if msg == "ME connection lost"
|
||||
);
|
||||
let route_switched = matches!(
|
||||
&e,
|
||||
crate::error::ProxyError::Proxy(msg) if msg == ROUTE_SWITCH_ERROR_MSG
|
||||
);
|
||||
|
||||
match (peer_closed, me_closed) {
|
||||
(true, _) => {
|
||||
if let Some(real_peer) = real_peer {
|
||||
debug!(peer = %peer_addr, real_peer = %real_peer, error = %e, "Connection closed by client");
|
||||
} else {
|
||||
debug!(peer = %peer_addr, error = %e, "Connection closed by client");
|
||||
}
|
||||
}
|
||||
(_, true) => {
|
||||
if let Some(real_peer) = real_peer {
|
||||
warn!(peer = %peer_addr, real_peer = %real_peer, error = %e, "Connection closed: Middle-End dropped session");
|
||||
} else {
|
||||
warn!(peer = %peer_addr, error = %e, "Connection closed: Middle-End dropped session");
|
||||
}
|
||||
}
|
||||
_ if route_switched => {
|
||||
if let Some(real_peer) = real_peer {
|
||||
info!(peer = %peer_addr, real_peer = %real_peer, error = %e, "Connection closed by controlled route cutover");
|
||||
} else {
|
||||
info!(peer = %peer_addr, error = %e, "Connection closed by controlled route cutover");
|
||||
}
|
||||
}
|
||||
_ if is_expected_handshake_eof(&e) => {
|
||||
if let Some(real_peer) = real_peer {
|
||||
info!(peer = %peer_addr, real_peer = %real_peer, error = %e, "Connection closed during initial handshake");
|
||||
} else {
|
||||
info!(peer = %peer_addr, error = %e, "Connection closed during initial handshake");
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
if let Some(real_peer) = real_peer {
|
||||
warn!(peer = %peer_addr, real_peer = %real_peer, error = %e, "Connection closed with error");
|
||||
} else {
|
||||
warn!(peer = %peer_addr, error = %e, "Connection closed with error");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Accept error: {}", e);
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
644
src/maestro/me_startup.rs
Normal file
644
src/maestro/me_startup.rs
Normal file
@@ -0,0 +1,644 @@
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::network::probe::{NetworkDecision, NetworkProbe};
|
||||
use crate::startup::{
|
||||
COMPONENT_ME_POOL_CONSTRUCT, COMPONENT_ME_POOL_INIT_STAGE1, COMPONENT_ME_PROXY_CONFIG_V4,
|
||||
COMPONENT_ME_PROXY_CONFIG_V6, COMPONENT_ME_SECRET_FETCH, StartupMeStatus, StartupTracker,
|
||||
};
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::UpstreamManager;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
|
||||
use super::helpers::load_startup_proxy_config_snapshot;
|
||||
|
||||
pub(crate) async fn initialize_me_pool(
|
||||
use_middle_proxy: bool,
|
||||
config: &ProxyConfig,
|
||||
decision: &NetworkDecision,
|
||||
probe: &NetworkProbe,
|
||||
startup_tracker: &Arc<StartupTracker>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
rng: Arc<SecureRandom>,
|
||||
stats: Arc<Stats>,
|
||||
api_me_pool: Arc<RwLock<Option<Arc<MePool>>>>,
|
||||
) -> Option<Arc<MePool>> {
|
||||
if !use_middle_proxy {
|
||||
return None;
|
||||
}
|
||||
|
||||
info!("=== Middle Proxy Mode ===");
|
||||
let me_nat_probe = config.general.middle_proxy_nat_probe && config.network.stun_use;
|
||||
if config.general.middle_proxy_nat_probe && !config.network.stun_use {
|
||||
info!("Middle-proxy STUN probing disabled by network.stun_use=false");
|
||||
}
|
||||
|
||||
let me2dc_fallback = config.general.me2dc_fallback;
|
||||
let me_init_retry_attempts = config.general.me_init_retry_attempts;
|
||||
let me_init_warn_after_attempts: u32 = 3;
|
||||
|
||||
// Global ad_tag (pool default). Used when user has no per-user tag in access.user_ad_tags.
|
||||
let proxy_tag = config
|
||||
.general
|
||||
.ad_tag
|
||||
.as_ref()
|
||||
.map(|tag| hex::decode(tag).expect("general.ad_tag must be validated before startup"));
|
||||
|
||||
// =============================================================
|
||||
// CRITICAL: Download Telegram proxy-secret (NOT user secret!)
|
||||
//
|
||||
// C MTProxy uses TWO separate secrets:
|
||||
// -S flag = 16-byte user secret for client obfuscation
|
||||
// --aes-pwd = 32-512 byte binary file for ME RPC auth
|
||||
//
|
||||
// proxy-secret is from: https://core.telegram.org/getProxySecret
|
||||
// =============================================================
|
||||
let proxy_secret_path = config.general.proxy_secret_path.as_deref();
|
||||
let pool_size = config.general.middle_proxy_pool_size.max(1);
|
||||
let proxy_secret = loop {
|
||||
match crate::transport::middle_proxy::fetch_proxy_secret_with_upstream(
|
||||
proxy_secret_path,
|
||||
config.general.proxy_secret_len_max,
|
||||
Some(upstream_manager.clone()),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(proxy_secret) => break Some(proxy_secret),
|
||||
Err(e) => {
|
||||
startup_tracker.set_me_last_error(Some(e.to_string())).await;
|
||||
if me2dc_fallback {
|
||||
error!(
|
||||
error = %e,
|
||||
"ME startup failed: proxy-secret is unavailable and no saved secret found; falling back to direct mode"
|
||||
);
|
||||
break None;
|
||||
}
|
||||
|
||||
warn!(
|
||||
error = %e,
|
||||
retry_in_secs = 2,
|
||||
"ME startup failed: proxy-secret is unavailable and no saved secret found; retrying because me2dc_fallback=false"
|
||||
);
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
}
|
||||
};
|
||||
match proxy_secret {
|
||||
Some(proxy_secret) => {
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_ME_SECRET_FETCH,
|
||||
Some("proxy-secret loaded".to_string()),
|
||||
)
|
||||
.await;
|
||||
info!(
|
||||
secret_len = proxy_secret.len(),
|
||||
key_sig = format_args!(
|
||||
"0x{:08x}",
|
||||
if proxy_secret.len() >= 4 {
|
||||
u32::from_le_bytes([
|
||||
proxy_secret[0],
|
||||
proxy_secret[1],
|
||||
proxy_secret[2],
|
||||
proxy_secret[3],
|
||||
])
|
||||
} else {
|
||||
0
|
||||
}
|
||||
),
|
||||
"Proxy-secret loaded"
|
||||
);
|
||||
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V4,
|
||||
Some("load startup proxy-config v4".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Initializing, COMPONENT_ME_PROXY_CONFIG_V4)
|
||||
.await;
|
||||
let cfg_v4 = load_startup_proxy_config_snapshot(
|
||||
"https://core.telegram.org/getProxyConfig",
|
||||
config.general.proxy_config_v4_cache_path.as_deref(),
|
||||
me2dc_fallback,
|
||||
"getProxyConfig",
|
||||
Some(upstream_manager.clone()),
|
||||
)
|
||||
.await;
|
||||
if cfg_v4.is_some() {
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V4,
|
||||
Some("proxy-config v4 loaded".to_string()),
|
||||
)
|
||||
.await;
|
||||
} else {
|
||||
startup_tracker
|
||||
.fail_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V4,
|
||||
Some("proxy-config v4 unavailable".to_string()),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V6,
|
||||
Some("load startup proxy-config v6".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Initializing, COMPONENT_ME_PROXY_CONFIG_V6)
|
||||
.await;
|
||||
let cfg_v6 = load_startup_proxy_config_snapshot(
|
||||
"https://core.telegram.org/getProxyConfigV6",
|
||||
config.general.proxy_config_v6_cache_path.as_deref(),
|
||||
me2dc_fallback,
|
||||
"getProxyConfigV6",
|
||||
Some(upstream_manager.clone()),
|
||||
)
|
||||
.await;
|
||||
if cfg_v6.is_some() {
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V6,
|
||||
Some("proxy-config v6 loaded".to_string()),
|
||||
)
|
||||
.await;
|
||||
} else {
|
||||
startup_tracker
|
||||
.fail_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V6,
|
||||
Some("proxy-config v6 unavailable".to_string()),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
if let (Some(cfg_v4), Some(cfg_v6)) = (cfg_v4, cfg_v6) {
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_ME_POOL_CONSTRUCT,
|
||||
Some("construct ME pool".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Initializing, COMPONENT_ME_POOL_CONSTRUCT)
|
||||
.await;
|
||||
let pool = MePool::new(
|
||||
proxy_tag.clone(),
|
||||
proxy_secret,
|
||||
config.general.middle_proxy_nat_ip,
|
||||
me_nat_probe,
|
||||
None,
|
||||
config.network.stun_servers.clone(),
|
||||
config.general.stun_nat_probe_concurrency,
|
||||
probe.detected_ipv6,
|
||||
config.timeouts.me_one_retry,
|
||||
config.timeouts.me_one_timeout_ms,
|
||||
cfg_v4.map.clone(),
|
||||
cfg_v6.map.clone(),
|
||||
cfg_v4.default_dc.or(cfg_v6.default_dc),
|
||||
decision.clone(),
|
||||
Some(upstream_manager.clone()),
|
||||
rng.clone(),
|
||||
stats.clone(),
|
||||
config.general.me_keepalive_enabled,
|
||||
config.general.me_keepalive_interval_secs,
|
||||
config.general.me_keepalive_jitter_secs,
|
||||
config.general.me_keepalive_payload_random,
|
||||
config.general.rpc_proxy_req_every,
|
||||
config.general.me_warmup_stagger_enabled,
|
||||
config.general.me_warmup_step_delay_ms,
|
||||
config.general.me_warmup_step_jitter_ms,
|
||||
config.general.me_reconnect_max_concurrent_per_dc,
|
||||
config.general.me_reconnect_backoff_base_ms,
|
||||
config.general.me_reconnect_backoff_cap_ms,
|
||||
config.general.me_reconnect_fast_retry_count,
|
||||
config.general.me_single_endpoint_shadow_writers,
|
||||
config.general.me_single_endpoint_outage_mode_enabled,
|
||||
config.general.me_single_endpoint_outage_disable_quarantine,
|
||||
config.general.me_single_endpoint_outage_backoff_min_ms,
|
||||
config.general.me_single_endpoint_outage_backoff_max_ms,
|
||||
config.general.me_single_endpoint_shadow_rotate_every_secs,
|
||||
config.general.me_floor_mode,
|
||||
config.general.me_adaptive_floor_idle_secs,
|
||||
config.general.me_adaptive_floor_min_writers_single_endpoint,
|
||||
config.general.me_adaptive_floor_min_writers_multi_endpoint,
|
||||
config.general.me_adaptive_floor_recover_grace_secs,
|
||||
config.general.me_adaptive_floor_writers_per_core_total,
|
||||
config.general.me_adaptive_floor_cpu_cores_override,
|
||||
config
|
||||
.general
|
||||
.me_adaptive_floor_max_extra_writers_single_per_core,
|
||||
config
|
||||
.general
|
||||
.me_adaptive_floor_max_extra_writers_multi_per_core,
|
||||
config.general.me_adaptive_floor_max_active_writers_per_core,
|
||||
config.general.me_adaptive_floor_max_warm_writers_per_core,
|
||||
config.general.me_adaptive_floor_max_active_writers_global,
|
||||
config.general.me_adaptive_floor_max_warm_writers_global,
|
||||
config.general.hardswap,
|
||||
config.general.me_pool_drain_ttl_secs,
|
||||
config.general.me_instadrain,
|
||||
config.general.me_pool_drain_threshold,
|
||||
config.general.me_pool_drain_soft_evict_enabled,
|
||||
config.general.me_pool_drain_soft_evict_grace_secs,
|
||||
config.general.me_pool_drain_soft_evict_per_writer,
|
||||
config.general.me_pool_drain_soft_evict_budget_per_core,
|
||||
config.general.me_pool_drain_soft_evict_cooldown_ms,
|
||||
config.general.effective_me_pool_force_close_secs(),
|
||||
config.general.me_pool_min_fresh_ratio,
|
||||
config.general.me_hardswap_warmup_delay_min_ms,
|
||||
config.general.me_hardswap_warmup_delay_max_ms,
|
||||
config.general.me_hardswap_warmup_extra_passes,
|
||||
config.general.me_hardswap_warmup_pass_backoff_base_ms,
|
||||
config.general.me_bind_stale_mode,
|
||||
config.general.me_bind_stale_ttl_secs,
|
||||
config.general.me_secret_atomic_snapshot,
|
||||
config.general.me_deterministic_writer_sort,
|
||||
config.general.me_writer_pick_mode,
|
||||
config.general.me_writer_pick_sample_size,
|
||||
config.general.me_socks_kdf_policy,
|
||||
config.general.me_writer_cmd_channel_capacity,
|
||||
config.general.me_route_channel_capacity,
|
||||
config.general.me_route_backpressure_base_timeout_ms,
|
||||
config.general.me_route_backpressure_high_timeout_ms,
|
||||
config.general.me_route_backpressure_high_watermark_pct,
|
||||
config.general.me_reader_route_data_wait_ms,
|
||||
config.general.me_health_interval_ms_unhealthy,
|
||||
config.general.me_health_interval_ms_healthy,
|
||||
config.general.me_warn_rate_limit_ms,
|
||||
config.general.me_route_no_writer_mode,
|
||||
config.general.me_route_no_writer_wait_ms,
|
||||
config.general.me_route_inline_recovery_attempts,
|
||||
config.general.me_route_inline_recovery_wait_ms,
|
||||
);
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_ME_POOL_CONSTRUCT,
|
||||
Some("ME pool object created".to_string()),
|
||||
)
|
||||
.await;
|
||||
*api_me_pool.write().await = Some(pool.clone());
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||
Some("initialize ME pool writers".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Initializing, COMPONENT_ME_POOL_INIT_STAGE1)
|
||||
.await;
|
||||
|
||||
if me2dc_fallback {
|
||||
let pool_bg = pool.clone();
|
||||
let rng_bg = rng.clone();
|
||||
let startup_tracker_bg = startup_tracker.clone();
|
||||
let retry_limit = if me_init_retry_attempts == 0 {
|
||||
String::from("unlimited")
|
||||
} else {
|
||||
me_init_retry_attempts.to_string()
|
||||
};
|
||||
std::thread::spawn(move || {
|
||||
let runtime = match tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
{
|
||||
Ok(runtime) => runtime,
|
||||
Err(error) => {
|
||||
error!(error = %error, "Failed to build background runtime for ME initialization");
|
||||
return;
|
||||
}
|
||||
};
|
||||
runtime.block_on(async move {
|
||||
let mut init_attempt: u32 = 0;
|
||||
loop {
|
||||
init_attempt = init_attempt.saturating_add(1);
|
||||
startup_tracker_bg.set_me_init_attempt(init_attempt).await;
|
||||
match pool_bg.init(pool_size, &rng_bg).await {
|
||||
Ok(()) => {
|
||||
startup_tracker_bg.set_me_last_error(None).await;
|
||||
startup_tracker_bg
|
||||
.complete_component(
|
||||
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||
Some("ME pool initialized".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker_bg
|
||||
.set_me_status(StartupMeStatus::Ready, "ready")
|
||||
.await;
|
||||
info!(
|
||||
attempt = init_attempt,
|
||||
"Middle-End pool initialized successfully"
|
||||
);
|
||||
|
||||
// ── Supervised background tasks ──────────────────
|
||||
// Each task runs inside a nested tokio::spawn so
|
||||
// that a panic is caught via JoinHandle and the
|
||||
// outer loop restarts the task automatically.
|
||||
let pool_health = pool_bg.clone();
|
||||
let rng_health = rng_bg.clone();
|
||||
let min_conns = pool_size;
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let p = pool_health.clone();
|
||||
let r = rng_health.clone();
|
||||
let res = tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_health_monitor(
|
||||
p, r, min_conns,
|
||||
)
|
||||
.await;
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(()) => warn!("me_health_monitor exited unexpectedly, restarting"),
|
||||
Err(e) => {
|
||||
error!(error = %e, "me_health_monitor panicked, restarting in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
let pool_drain_enforcer = pool_bg.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let p = pool_drain_enforcer.clone();
|
||||
let res = tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_drain_timeout_enforcer(p).await;
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(()) => warn!("me_drain_timeout_enforcer exited unexpectedly, restarting"),
|
||||
Err(e) => {
|
||||
error!(error = %e, "me_drain_timeout_enforcer panicked, restarting in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
let pool_watchdog = pool_bg.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let p = pool_watchdog.clone();
|
||||
let res = tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_zombie_writer_watchdog(p).await;
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(()) => warn!("me_zombie_writer_watchdog exited unexpectedly, restarting"),
|
||||
Err(e) => {
|
||||
error!(error = %e, "me_zombie_writer_watchdog panicked, restarting in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
// CRITICAL: keep the current-thread runtime
|
||||
// alive. Without this, block_on() returns,
|
||||
// the Runtime is dropped, and ALL spawned
|
||||
// background tasks (health monitor, drain
|
||||
// enforcer, zombie watchdog) are silently
|
||||
// cancelled — causing the draining-writer
|
||||
// leak that brought us here.
|
||||
std::future::pending::<()>().await;
|
||||
unreachable!();
|
||||
}
|
||||
Err(e) => {
|
||||
startup_tracker_bg.set_me_last_error(Some(e.to_string())).await;
|
||||
if init_attempt >= me_init_warn_after_attempts {
|
||||
warn!(
|
||||
error = %e,
|
||||
attempt = init_attempt,
|
||||
retry_limit = %retry_limit,
|
||||
retry_in_secs = 2,
|
||||
"ME pool is not ready yet; retrying background initialization"
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
error = %e,
|
||||
attempt = init_attempt,
|
||||
retry_limit = %retry_limit,
|
||||
retry_in_secs = 2,
|
||||
"ME pool startup warmup: retrying background initialization"
|
||||
);
|
||||
}
|
||||
pool_bg.reset_stun_state();
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Initializing, "background_init")
|
||||
.await;
|
||||
info!(
|
||||
startup_grace_secs = 80,
|
||||
"ME pool initialization continues in background; startup continues with conditional Direct fallback"
|
||||
);
|
||||
Some(pool)
|
||||
} else {
|
||||
let mut init_attempt: u32 = 0;
|
||||
loop {
|
||||
init_attempt = init_attempt.saturating_add(1);
|
||||
startup_tracker.set_me_init_attempt(init_attempt).await;
|
||||
match pool.init(pool_size, &rng).await {
|
||||
Ok(()) => {
|
||||
startup_tracker.set_me_last_error(None).await;
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||
Some("ME pool initialized".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Ready, "ready")
|
||||
.await;
|
||||
info!(
|
||||
attempt = init_attempt,
|
||||
"Middle-End pool initialized successfully"
|
||||
);
|
||||
|
||||
// ── Supervised background tasks ──────────────────
|
||||
let pool_clone = pool.clone();
|
||||
let rng_clone = rng.clone();
|
||||
let min_conns = pool_size;
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let p = pool_clone.clone();
|
||||
let r = rng_clone.clone();
|
||||
let res = tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_health_monitor(
|
||||
p, r, min_conns,
|
||||
)
|
||||
.await;
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(()) => warn!(
|
||||
"me_health_monitor exited unexpectedly, restarting"
|
||||
),
|
||||
Err(e) => {
|
||||
error!(error = %e, "me_health_monitor panicked, restarting in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
let pool_drain_enforcer = pool.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let p = pool_drain_enforcer.clone();
|
||||
let res = tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_drain_timeout_enforcer(p).await;
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(()) => warn!(
|
||||
"me_drain_timeout_enforcer exited unexpectedly, restarting"
|
||||
),
|
||||
Err(e) => {
|
||||
error!(error = %e, "me_drain_timeout_enforcer panicked, restarting in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
let pool_watchdog = pool.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let p = pool_watchdog.clone();
|
||||
let res = tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_zombie_writer_watchdog(p).await;
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(()) => warn!(
|
||||
"me_zombie_writer_watchdog exited unexpectedly, restarting"
|
||||
),
|
||||
Err(e) => {
|
||||
error!(error = %e, "me_zombie_writer_watchdog panicked, restarting in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
break Some(pool);
|
||||
}
|
||||
Err(e) => {
|
||||
startup_tracker.set_me_last_error(Some(e.to_string())).await;
|
||||
let retries_limited = me_init_retry_attempts > 0;
|
||||
if retries_limited && init_attempt >= me_init_retry_attempts {
|
||||
startup_tracker
|
||||
.fail_component(
|
||||
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||
Some("ME init retry budget exhausted".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Failed, "failed")
|
||||
.await;
|
||||
error!(
|
||||
error = %e,
|
||||
attempt = init_attempt,
|
||||
retry_limit = me_init_retry_attempts,
|
||||
"ME pool init retries exhausted; startup cannot continue in middle-proxy mode"
|
||||
);
|
||||
break None;
|
||||
}
|
||||
|
||||
let retry_limit = if me_init_retry_attempts == 0 {
|
||||
String::from("unlimited")
|
||||
} else {
|
||||
me_init_retry_attempts.to_string()
|
||||
};
|
||||
if init_attempt >= me_init_warn_after_attempts {
|
||||
warn!(
|
||||
error = %e,
|
||||
attempt = init_attempt,
|
||||
retry_limit = retry_limit,
|
||||
me2dc_fallback = me2dc_fallback,
|
||||
retry_in_secs = 2,
|
||||
"ME pool is not ready yet; retrying startup initialization"
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
error = %e,
|
||||
attempt = init_attempt,
|
||||
retry_limit = retry_limit,
|
||||
me2dc_fallback = me2dc_fallback,
|
||||
retry_in_secs = 2,
|
||||
"ME pool startup warmup: retrying initialization"
|
||||
);
|
||||
}
|
||||
pool.reset_stun_state();
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_POOL_CONSTRUCT,
|
||||
Some("ME configs are incomplete".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.fail_component(
|
||||
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||
Some("ME configs are incomplete".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Failed, "failed")
|
||||
.await;
|
||||
None
|
||||
}
|
||||
}
|
||||
None => {
|
||||
startup_tracker
|
||||
.fail_component(
|
||||
COMPONENT_ME_SECRET_FETCH,
|
||||
Some("proxy-secret unavailable".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V4,
|
||||
Some("proxy-secret unavailable".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V6,
|
||||
Some("proxy-secret unavailable".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_POOL_CONSTRUCT,
|
||||
Some("proxy-secret unavailable".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.fail_component(
|
||||
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||
Some("proxy-secret unavailable".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Failed, "failed")
|
||||
.await;
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
628
src/maestro/mod.rs
Normal file
628
src/maestro/mod.rs
Normal file
@@ -0,0 +1,628 @@
|
||||
//! telemt — Telegram MTProto Proxy
|
||||
|
||||
#![allow(unused_assignments)]
|
||||
|
||||
// Runtime orchestration modules.
|
||||
// - helpers: CLI and shared startup/runtime helper routines.
|
||||
// - tls_bootstrap: TLS front cache bootstrap and refresh tasks.
|
||||
// - me_startup: Middle-End secret/config fetch and pool initialization.
|
||||
// - connectivity: startup ME/DC connectivity diagnostics.
|
||||
// - runtime_tasks: hot-reload and background task orchestration.
|
||||
// - admission: conditional-cast gate and route mode switching.
|
||||
// - listeners: TCP/Unix listener bind and accept-loop orchestration.
|
||||
// - shutdown: graceful shutdown sequence and uptime logging.
|
||||
mod admission;
|
||||
mod connectivity;
|
||||
mod helpers;
|
||||
mod listeners;
|
||||
mod me_startup;
|
||||
mod runtime_tasks;
|
||||
mod shutdown;
|
||||
mod tls_bootstrap;
|
||||
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||
use tokio::sync::{RwLock, Semaphore, watch};
|
||||
use tracing::{error, info, warn};
|
||||
use tracing_subscriber::{EnvFilter, fmt, prelude::*, reload};
|
||||
|
||||
use crate::api;
|
||||
use crate::config::{LogLevel, ProxyConfig};
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::network::probe::{decide_network_capabilities, log_probe_result, run_probe};
|
||||
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
|
||||
use crate::startup::{
|
||||
COMPONENT_API_BOOTSTRAP, COMPONENT_CONFIG_LOAD, COMPONENT_ME_POOL_CONSTRUCT,
|
||||
COMPONENT_ME_POOL_INIT_STAGE1, COMPONENT_ME_PROXY_CONFIG_V4, COMPONENT_ME_PROXY_CONFIG_V6,
|
||||
COMPONENT_ME_SECRET_FETCH, COMPONENT_NETWORK_PROBE, COMPONENT_TRACING_INIT, StartupMeStatus,
|
||||
StartupTracker,
|
||||
};
|
||||
use crate::stats::beobachten::BeobachtenStore;
|
||||
use crate::stats::telemetry::TelemetryPolicy;
|
||||
use crate::stats::{ReplayChecker, Stats};
|
||||
use crate::stream::BufferPool;
|
||||
use crate::transport::UpstreamManager;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
use helpers::{parse_cli, resolve_runtime_config_path};
|
||||
|
||||
/// Runs the full telemt runtime startup pipeline and blocks until shutdown.
|
||||
pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
let process_started_at = Instant::now();
|
||||
let process_started_at_epoch_secs = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
let startup_tracker = Arc::new(StartupTracker::new(process_started_at_epoch_secs));
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_CONFIG_LOAD,
|
||||
Some("load and validate config".to_string()),
|
||||
)
|
||||
.await;
|
||||
let (config_path_cli, data_path, cli_silent, cli_log_level) = parse_cli();
|
||||
let startup_cwd = match std::env::current_dir() {
|
||||
Ok(cwd) => cwd,
|
||||
Err(e) => {
|
||||
eprintln!("[telemt] Can't read current_dir: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
let config_path = resolve_runtime_config_path(&config_path_cli, &startup_cwd);
|
||||
|
||||
let mut config = match ProxyConfig::load(&config_path) {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
if config_path.exists() {
|
||||
eprintln!("[telemt] Error: {}", e);
|
||||
std::process::exit(1);
|
||||
} else {
|
||||
let default = ProxyConfig::default();
|
||||
std::fs::write(&config_path, toml::to_string_pretty(&default).unwrap()).unwrap();
|
||||
eprintln!(
|
||||
"[telemt] Created default config at {}",
|
||||
config_path.display()
|
||||
);
|
||||
default
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = config.validate() {
|
||||
eprintln!("[telemt] Invalid config: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
if let Some(p) = data_path {
|
||||
config.general.data_path = Some(p);
|
||||
}
|
||||
|
||||
if let Some(ref data_path) = config.general.data_path {
|
||||
if !data_path.is_absolute() {
|
||||
eprintln!(
|
||||
"[telemt] data_path must be absolute: {}",
|
||||
data_path.display()
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
if data_path.exists() {
|
||||
if !data_path.is_dir() {
|
||||
eprintln!(
|
||||
"[telemt] data_path exists but is not a directory: {}",
|
||||
data_path.display()
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
} else {
|
||||
if let Err(e) = std::fs::create_dir_all(data_path) {
|
||||
eprintln!(
|
||||
"[telemt] Can't create data_path {}: {}",
|
||||
data_path.display(),
|
||||
e
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(e) = std::env::set_current_dir(data_path) {
|
||||
eprintln!(
|
||||
"[telemt] Can't use data_path {}: {}",
|
||||
data_path.display(),
|
||||
e
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(e) = crate::network::dns_overrides::install_entries(&config.network.dns_overrides) {
|
||||
eprintln!("[telemt] Invalid network.dns_overrides: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
startup_tracker
|
||||
.complete_component(COMPONENT_CONFIG_LOAD, Some("config is ready".to_string()))
|
||||
.await;
|
||||
|
||||
let has_rust_log = std::env::var("RUST_LOG").is_ok();
|
||||
let effective_log_level = if cli_silent {
|
||||
LogLevel::Silent
|
||||
} else if let Some(ref s) = cli_log_level {
|
||||
LogLevel::from_str_loose(s)
|
||||
} else {
|
||||
config.general.log_level.clone()
|
||||
};
|
||||
|
||||
let (filter_layer, filter_handle) = reload::Layer::new(EnvFilter::new("info"));
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_TRACING_INIT,
|
||||
Some("initialize tracing subscriber".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Configure color output based on config
|
||||
let fmt_layer = if config.general.disable_colors {
|
||||
fmt::Layer::default().with_ansi(false)
|
||||
} else {
|
||||
fmt::Layer::default().with_ansi(true)
|
||||
};
|
||||
|
||||
tracing_subscriber::registry()
|
||||
.with(filter_layer)
|
||||
.with(fmt_layer)
|
||||
.init();
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_TRACING_INIT,
|
||||
Some("tracing initialized".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
info!("Telemt MTProxy v{}", env!("CARGO_PKG_VERSION"));
|
||||
info!("Log level: {}", effective_log_level);
|
||||
if config.general.disable_colors {
|
||||
info!("Colors: disabled");
|
||||
}
|
||||
info!(
|
||||
"Modes: classic={} secure={} tls={}",
|
||||
config.general.modes.classic, config.general.modes.secure, config.general.modes.tls
|
||||
);
|
||||
if config.general.modes.classic {
|
||||
warn!("Classic mode is vulnerable to DPI detection; enable only for legacy clients");
|
||||
}
|
||||
info!("TLS domain: {}", config.censorship.tls_domain);
|
||||
if let Some(ref sock) = config.censorship.mask_unix_sock {
|
||||
info!("Mask: {} -> unix:{}", config.censorship.mask, sock);
|
||||
if !std::path::Path::new(sock).exists() {
|
||||
warn!(
|
||||
"Unix socket '{}' does not exist yet. Masking will fail until it appears.",
|
||||
sock
|
||||
);
|
||||
}
|
||||
} else {
|
||||
info!(
|
||||
"Mask: {} -> {}:{}",
|
||||
config.censorship.mask,
|
||||
config
|
||||
.censorship
|
||||
.mask_host
|
||||
.as_deref()
|
||||
.unwrap_or(&config.censorship.tls_domain),
|
||||
config.censorship.mask_port
|
||||
);
|
||||
}
|
||||
|
||||
if config.censorship.tls_domain == "www.google.com" {
|
||||
warn!("Using default tls_domain. Consider setting a custom domain.");
|
||||
}
|
||||
|
||||
let stats = Arc::new(Stats::new());
|
||||
stats.apply_telemetry_policy(TelemetryPolicy::from_config(&config.general.telemetry));
|
||||
|
||||
let upstream_manager = Arc::new(UpstreamManager::new(
|
||||
config.upstreams.clone(),
|
||||
config.general.upstream_connect_retry_attempts,
|
||||
config.general.upstream_connect_retry_backoff_ms,
|
||||
config.general.upstream_connect_budget_ms,
|
||||
config.general.upstream_unhealthy_fail_threshold,
|
||||
config.general.upstream_connect_failfast_hard_errors,
|
||||
stats.clone(),
|
||||
));
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker
|
||||
.load_limits(
|
||||
config.access.user_max_unique_ips_global_each,
|
||||
&config.access.user_max_unique_ips,
|
||||
)
|
||||
.await;
|
||||
ip_tracker
|
||||
.set_limit_policy(
|
||||
config.access.user_max_unique_ips_mode,
|
||||
config.access.user_max_unique_ips_window_secs,
|
||||
)
|
||||
.await;
|
||||
if config.access.user_max_unique_ips_global_each > 0
|
||||
|| !config.access.user_max_unique_ips.is_empty()
|
||||
{
|
||||
info!(
|
||||
global_each_limit = config.access.user_max_unique_ips_global_each,
|
||||
explicit_user_limits = config.access.user_max_unique_ips.len(),
|
||||
"User unique IP limits configured"
|
||||
);
|
||||
}
|
||||
if !config.network.dns_overrides.is_empty() {
|
||||
info!(
|
||||
"Runtime DNS overrides configured: {} entries",
|
||||
config.network.dns_overrides.len()
|
||||
);
|
||||
}
|
||||
|
||||
let (api_config_tx, api_config_rx) = watch::channel(Arc::new(config.clone()));
|
||||
let (detected_ips_tx, detected_ips_rx) = watch::channel((None::<IpAddr>, None::<IpAddr>));
|
||||
let initial_admission_open = !config.general.use_middle_proxy;
|
||||
let (admission_tx, admission_rx) = watch::channel(initial_admission_open);
|
||||
let initial_route_mode = if config.general.use_middle_proxy {
|
||||
RelayRouteMode::Middle
|
||||
} else {
|
||||
RelayRouteMode::Direct
|
||||
};
|
||||
let route_runtime = Arc::new(RouteRuntimeController::new(initial_route_mode));
|
||||
let api_me_pool = Arc::new(RwLock::new(None::<Arc<MePool>>));
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_API_BOOTSTRAP,
|
||||
Some("spawn API listener task".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
if config.server.api.enabled {
|
||||
let listen = match config.server.api.listen.parse::<SocketAddr>() {
|
||||
Ok(listen) => listen,
|
||||
Err(error) => {
|
||||
warn!(
|
||||
error = %error,
|
||||
listen = %config.server.api.listen,
|
||||
"Invalid server.api.listen; API is disabled"
|
||||
);
|
||||
SocketAddr::from(([127, 0, 0, 1], 0))
|
||||
}
|
||||
};
|
||||
if listen.port() != 0 {
|
||||
let stats_api = stats.clone();
|
||||
let ip_tracker_api = ip_tracker.clone();
|
||||
let me_pool_api = api_me_pool.clone();
|
||||
let upstream_manager_api = upstream_manager.clone();
|
||||
let route_runtime_api = route_runtime.clone();
|
||||
let config_rx_api = api_config_rx.clone();
|
||||
let admission_rx_api = admission_rx.clone();
|
||||
let config_path_api = config_path.clone();
|
||||
let startup_tracker_api = startup_tracker.clone();
|
||||
let detected_ips_rx_api = detected_ips_rx.clone();
|
||||
tokio::spawn(async move {
|
||||
api::serve(
|
||||
listen,
|
||||
stats_api,
|
||||
ip_tracker_api,
|
||||
me_pool_api,
|
||||
route_runtime_api,
|
||||
upstream_manager_api,
|
||||
config_rx_api,
|
||||
admission_rx_api,
|
||||
config_path_api,
|
||||
detected_ips_rx_api,
|
||||
process_started_at_epoch_secs,
|
||||
startup_tracker_api,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_API_BOOTSTRAP,
|
||||
Some(format!("api task spawned on {}", listen)),
|
||||
)
|
||||
.await;
|
||||
} else {
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_API_BOOTSTRAP,
|
||||
Some("server.api.listen has zero port".to_string()),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
} else {
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_API_BOOTSTRAP,
|
||||
Some("server.api.enabled is false".to_string()),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
let mut tls_domains = Vec::with_capacity(1 + config.censorship.tls_domains.len());
|
||||
tls_domains.push(config.censorship.tls_domain.clone());
|
||||
for d in &config.censorship.tls_domains {
|
||||
if !tls_domains.contains(d) {
|
||||
tls_domains.push(d.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let tls_cache = tls_bootstrap::bootstrap_tls_front(
|
||||
&config,
|
||||
&tls_domains,
|
||||
upstream_manager.clone(),
|
||||
&startup_tracker,
|
||||
)
|
||||
.await;
|
||||
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_NETWORK_PROBE,
|
||||
Some("probe network capabilities".to_string()),
|
||||
)
|
||||
.await;
|
||||
let probe = run_probe(
|
||||
&config.network,
|
||||
&config.upstreams,
|
||||
config.general.middle_proxy_nat_probe,
|
||||
config.general.stun_nat_probe_concurrency,
|
||||
)
|
||||
.await?;
|
||||
detected_ips_tx.send_replace((
|
||||
probe.detected_ipv4.map(IpAddr::V4),
|
||||
probe.detected_ipv6.map(IpAddr::V6),
|
||||
));
|
||||
let decision =
|
||||
decide_network_capabilities(&config.network, &probe, config.general.middle_proxy_nat_ip);
|
||||
log_probe_result(&probe, &decision);
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_NETWORK_PROBE,
|
||||
Some("network capabilities determined".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
let prefer_ipv6 = decision.prefer_ipv6();
|
||||
let mut use_middle_proxy = config.general.use_middle_proxy;
|
||||
let beobachten = Arc::new(BeobachtenStore::new());
|
||||
let rng = Arc::new(SecureRandom::new());
|
||||
|
||||
// Connection concurrency limit (0 = unlimited)
|
||||
let max_connections_limit = if config.server.max_connections == 0 {
|
||||
Semaphore::MAX_PERMITS
|
||||
} else {
|
||||
config.server.max_connections as usize
|
||||
};
|
||||
let max_connections = Arc::new(Semaphore::new(max_connections_limit));
|
||||
|
||||
let me2dc_fallback = config.general.me2dc_fallback;
|
||||
let me_init_retry_attempts = config.general.me_init_retry_attempts;
|
||||
if use_middle_proxy && !decision.ipv4_me && !decision.ipv6_me {
|
||||
if me2dc_fallback {
|
||||
warn!("No usable IP family for Middle Proxy detected; falling back to direct DC");
|
||||
use_middle_proxy = false;
|
||||
} else {
|
||||
warn!(
|
||||
"No usable IP family for Middle Proxy detected; me2dc_fallback=false, ME init retries stay active"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if use_middle_proxy {
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Initializing, COMPONENT_ME_SECRET_FETCH)
|
||||
.await;
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_ME_SECRET_FETCH,
|
||||
Some("fetch proxy-secret from source/cache".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_me_retry_limit(if !me2dc_fallback || me_init_retry_attempts == 0 {
|
||||
"unlimited".to_string()
|
||||
} else {
|
||||
me_init_retry_attempts.to_string()
|
||||
})
|
||||
.await;
|
||||
} else {
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Skipped, "skipped")
|
||||
.await;
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_SECRET_FETCH,
|
||||
Some("middle proxy mode disabled".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V4,
|
||||
Some("middle proxy mode disabled".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V6,
|
||||
Some("middle proxy mode disabled".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_POOL_CONSTRUCT,
|
||||
Some("middle proxy mode disabled".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||
Some("middle proxy mode disabled".to_string()),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
let me_pool: Option<Arc<MePool>> = me_startup::initialize_me_pool(
|
||||
use_middle_proxy,
|
||||
&config,
|
||||
&decision,
|
||||
&probe,
|
||||
&startup_tracker,
|
||||
upstream_manager.clone(),
|
||||
rng.clone(),
|
||||
stats.clone(),
|
||||
api_me_pool.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
// If ME failed to initialize, force direct-only mode.
|
||||
if me_pool.is_some() {
|
||||
startup_tracker.set_transport_mode("middle_proxy").await;
|
||||
startup_tracker.set_degraded(false).await;
|
||||
info!("Transport: Middle-End Proxy - all DC-over-RPC");
|
||||
} else {
|
||||
let _ = use_middle_proxy;
|
||||
use_middle_proxy = false;
|
||||
// Make runtime config reflect direct-only mode for handlers.
|
||||
config.general.use_middle_proxy = false;
|
||||
startup_tracker.set_transport_mode("direct").await;
|
||||
startup_tracker.set_degraded(true).await;
|
||||
if me2dc_fallback {
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Failed, "fallback_to_direct")
|
||||
.await;
|
||||
} else {
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Skipped, "skipped")
|
||||
.await;
|
||||
}
|
||||
info!("Transport: Direct DC - TCP - standard DC-over-TCP");
|
||||
}
|
||||
|
||||
// Freeze config after possible fallback decision
|
||||
let config = Arc::new(config);
|
||||
|
||||
let replay_checker = Arc::new(ReplayChecker::new(
|
||||
config.access.replay_check_len,
|
||||
Duration::from_secs(config.access.replay_window_secs),
|
||||
));
|
||||
|
||||
let buffer_pool = Arc::new(BufferPool::with_config(64 * 1024, 4096));
|
||||
|
||||
connectivity::run_startup_connectivity(
|
||||
&config,
|
||||
&me_pool,
|
||||
rng.clone(),
|
||||
&startup_tracker,
|
||||
upstream_manager.clone(),
|
||||
prefer_ipv6,
|
||||
&decision,
|
||||
process_started_at,
|
||||
api_me_pool.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let runtime_watches = runtime_tasks::spawn_runtime_tasks(
|
||||
&config,
|
||||
&config_path,
|
||||
&probe,
|
||||
prefer_ipv6,
|
||||
decision.ipv4_dc,
|
||||
decision.ipv6_dc,
|
||||
&startup_tracker,
|
||||
stats.clone(),
|
||||
upstream_manager.clone(),
|
||||
replay_checker.clone(),
|
||||
me_pool.clone(),
|
||||
rng.clone(),
|
||||
ip_tracker.clone(),
|
||||
beobachten.clone(),
|
||||
api_config_tx.clone(),
|
||||
me_pool.clone(),
|
||||
)
|
||||
.await;
|
||||
let config_rx = runtime_watches.config_rx;
|
||||
let log_level_rx = runtime_watches.log_level_rx;
|
||||
let detected_ip_v4 = runtime_watches.detected_ip_v4;
|
||||
let detected_ip_v6 = runtime_watches.detected_ip_v6;
|
||||
|
||||
admission::configure_admission_gate(
|
||||
&config,
|
||||
me_pool.clone(),
|
||||
route_runtime.clone(),
|
||||
&admission_tx,
|
||||
config_rx.clone(),
|
||||
)
|
||||
.await;
|
||||
let _admission_tx_hold = admission_tx;
|
||||
|
||||
let bound = listeners::bind_listeners(
|
||||
&config,
|
||||
decision.ipv4_dc,
|
||||
decision.ipv6_dc,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
&startup_tracker,
|
||||
config_rx.clone(),
|
||||
admission_rx.clone(),
|
||||
stats.clone(),
|
||||
upstream_manager.clone(),
|
||||
replay_checker.clone(),
|
||||
buffer_pool.clone(),
|
||||
rng.clone(),
|
||||
me_pool.clone(),
|
||||
route_runtime.clone(),
|
||||
tls_cache.clone(),
|
||||
ip_tracker.clone(),
|
||||
beobachten.clone(),
|
||||
max_connections.clone(),
|
||||
)
|
||||
.await?;
|
||||
let listeners = bound.listeners;
|
||||
let has_unix_listener = bound.has_unix_listener;
|
||||
|
||||
if listeners.is_empty() && !has_unix_listener {
|
||||
error!("No listeners. Exiting.");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
runtime_tasks::apply_runtime_log_filter(
|
||||
has_rust_log,
|
||||
&effective_log_level,
|
||||
filter_handle,
|
||||
log_level_rx,
|
||||
)
|
||||
.await;
|
||||
|
||||
runtime_tasks::spawn_metrics_if_configured(
|
||||
&config,
|
||||
&startup_tracker,
|
||||
stats.clone(),
|
||||
beobachten.clone(),
|
||||
ip_tracker.clone(),
|
||||
config_rx.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
runtime_tasks::mark_runtime_ready(&startup_tracker).await;
|
||||
|
||||
listeners::spawn_tcp_accept_loops(
|
||||
listeners,
|
||||
config_rx.clone(),
|
||||
admission_rx.clone(),
|
||||
stats.clone(),
|
||||
upstream_manager.clone(),
|
||||
replay_checker.clone(),
|
||||
buffer_pool.clone(),
|
||||
rng.clone(),
|
||||
me_pool.clone(),
|
||||
route_runtime.clone(),
|
||||
tls_cache.clone(),
|
||||
ip_tracker.clone(),
|
||||
beobachten.clone(),
|
||||
max_connections.clone(),
|
||||
);
|
||||
|
||||
shutdown::wait_for_shutdown(process_started_at, me_pool).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
362
src/maestro/runtime_tasks.rs
Normal file
362
src/maestro/runtime_tasks.rs
Normal file
@@ -0,0 +1,362 @@
|
||||
use std::net::IpAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::sync::{mpsc, watch};
|
||||
use tracing::{debug, warn};
|
||||
use tracing_subscriber::EnvFilter;
|
||||
use tracing_subscriber::reload;
|
||||
|
||||
use crate::config::hot_reload::spawn_config_watcher;
|
||||
use crate::config::{LogLevel, ProxyConfig};
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::metrics;
|
||||
use crate::network::probe::NetworkProbe;
|
||||
use crate::startup::{
|
||||
COMPONENT_CONFIG_WATCHER_START, COMPONENT_METRICS_START, COMPONENT_RUNTIME_READY,
|
||||
StartupTracker,
|
||||
};
|
||||
use crate::stats::beobachten::BeobachtenStore;
|
||||
use crate::stats::telemetry::TelemetryPolicy;
|
||||
use crate::stats::{ReplayChecker, Stats};
|
||||
use crate::transport::UpstreamManager;
|
||||
use crate::transport::middle_proxy::{MePool, MeReinitTrigger};
|
||||
|
||||
use super::helpers::write_beobachten_snapshot;
|
||||
|
||||
pub(crate) struct RuntimeWatches {
|
||||
pub(crate) config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
pub(crate) log_level_rx: watch::Receiver<LogLevel>,
|
||||
pub(crate) detected_ip_v4: Option<IpAddr>,
|
||||
pub(crate) detected_ip_v6: Option<IpAddr>,
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) async fn spawn_runtime_tasks(
|
||||
config: &Arc<ProxyConfig>,
|
||||
config_path: &Path,
|
||||
probe: &NetworkProbe,
|
||||
prefer_ipv6: bool,
|
||||
decision_ipv4_dc: bool,
|
||||
decision_ipv6_dc: bool,
|
||||
startup_tracker: &Arc<StartupTracker>,
|
||||
stats: Arc<Stats>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
replay_checker: Arc<ReplayChecker>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
rng: Arc<SecureRandom>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
api_config_tx: watch::Sender<Arc<ProxyConfig>>,
|
||||
me_pool_for_policy: Option<Arc<MePool>>,
|
||||
) -> RuntimeWatches {
|
||||
let um_clone = upstream_manager.clone();
|
||||
let dc_overrides_for_health = config.dc_overrides.clone();
|
||||
tokio::spawn(async move {
|
||||
um_clone
|
||||
.run_health_checks(
|
||||
prefer_ipv6,
|
||||
decision_ipv4_dc,
|
||||
decision_ipv6_dc,
|
||||
dc_overrides_for_health,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
|
||||
let rc_clone = replay_checker.clone();
|
||||
tokio::spawn(async move {
|
||||
rc_clone.run_periodic_cleanup().await;
|
||||
});
|
||||
|
||||
let detected_ip_v4: Option<IpAddr> = probe.detected_ipv4.map(IpAddr::V4);
|
||||
let detected_ip_v6: Option<IpAddr> = probe.detected_ipv6.map(IpAddr::V6);
|
||||
debug!(
|
||||
"Detected IPs: v4={:?} v6={:?}",
|
||||
detected_ip_v4, detected_ip_v6
|
||||
);
|
||||
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_CONFIG_WATCHER_START,
|
||||
Some("spawn config hot-reload watcher".to_string()),
|
||||
)
|
||||
.await;
|
||||
let (config_rx, log_level_rx): (watch::Receiver<Arc<ProxyConfig>>, watch::Receiver<LogLevel>) =
|
||||
spawn_config_watcher(
|
||||
config_path.to_path_buf(),
|
||||
config.clone(),
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
);
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_CONFIG_WATCHER_START,
|
||||
Some("config hot-reload watcher started".to_string()),
|
||||
)
|
||||
.await;
|
||||
let mut config_rx_api_bridge = config_rx.clone();
|
||||
let api_config_tx_bridge = api_config_tx.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if config_rx_api_bridge.changed().await.is_err() {
|
||||
break;
|
||||
}
|
||||
let cfg = config_rx_api_bridge.borrow_and_update().clone();
|
||||
api_config_tx_bridge.send_replace(cfg);
|
||||
}
|
||||
});
|
||||
|
||||
let stats_policy = stats.clone();
|
||||
let mut config_rx_policy = config_rx.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if config_rx_policy.changed().await.is_err() {
|
||||
break;
|
||||
}
|
||||
let cfg = config_rx_policy.borrow_and_update().clone();
|
||||
stats_policy
|
||||
.apply_telemetry_policy(TelemetryPolicy::from_config(&cfg.general.telemetry));
|
||||
if let Some(pool) = &me_pool_for_policy {
|
||||
pool.update_runtime_transport_policy(
|
||||
cfg.general.me_socks_kdf_policy,
|
||||
cfg.general.me_route_backpressure_base_timeout_ms,
|
||||
cfg.general.me_route_backpressure_high_timeout_ms,
|
||||
cfg.general.me_route_backpressure_high_watermark_pct,
|
||||
cfg.general.me_reader_route_data_wait_ms,
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let ip_tracker_policy = ip_tracker.clone();
|
||||
let mut config_rx_ip_limits = config_rx.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut prev_limits = config_rx_ip_limits
|
||||
.borrow()
|
||||
.access
|
||||
.user_max_unique_ips
|
||||
.clone();
|
||||
let mut prev_global_each = config_rx_ip_limits
|
||||
.borrow()
|
||||
.access
|
||||
.user_max_unique_ips_global_each;
|
||||
let mut prev_mode = config_rx_ip_limits.borrow().access.user_max_unique_ips_mode;
|
||||
let mut prev_window = config_rx_ip_limits
|
||||
.borrow()
|
||||
.access
|
||||
.user_max_unique_ips_window_secs;
|
||||
|
||||
loop {
|
||||
if config_rx_ip_limits.changed().await.is_err() {
|
||||
break;
|
||||
}
|
||||
let cfg = config_rx_ip_limits.borrow_and_update().clone();
|
||||
|
||||
if prev_limits != cfg.access.user_max_unique_ips
|
||||
|| prev_global_each != cfg.access.user_max_unique_ips_global_each
|
||||
{
|
||||
ip_tracker_policy
|
||||
.load_limits(
|
||||
cfg.access.user_max_unique_ips_global_each,
|
||||
&cfg.access.user_max_unique_ips,
|
||||
)
|
||||
.await;
|
||||
prev_limits = cfg.access.user_max_unique_ips.clone();
|
||||
prev_global_each = cfg.access.user_max_unique_ips_global_each;
|
||||
}
|
||||
|
||||
if prev_mode != cfg.access.user_max_unique_ips_mode
|
||||
|| prev_window != cfg.access.user_max_unique_ips_window_secs
|
||||
{
|
||||
ip_tracker_policy
|
||||
.set_limit_policy(
|
||||
cfg.access.user_max_unique_ips_mode,
|
||||
cfg.access.user_max_unique_ips_window_secs,
|
||||
)
|
||||
.await;
|
||||
prev_mode = cfg.access.user_max_unique_ips_mode;
|
||||
prev_window = cfg.access.user_max_unique_ips_window_secs;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let beobachten_writer = beobachten.clone();
|
||||
let config_rx_beobachten = config_rx.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let cfg = config_rx_beobachten.borrow().clone();
|
||||
let sleep_secs = cfg.general.beobachten_flush_secs.max(1);
|
||||
|
||||
if cfg.general.beobachten {
|
||||
let ttl = std::time::Duration::from_secs(
|
||||
cfg.general.beobachten_minutes.saturating_mul(60),
|
||||
);
|
||||
let path = cfg.general.beobachten_file.clone();
|
||||
let snapshot = beobachten_writer.snapshot_text(ttl);
|
||||
if let Err(e) = write_beobachten_snapshot(&path, &snapshot).await {
|
||||
warn!(error = %e, path = %path, "Failed to flush beobachten snapshot");
|
||||
}
|
||||
}
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_secs(sleep_secs)).await;
|
||||
}
|
||||
});
|
||||
|
||||
if let Some(pool) = me_pool {
|
||||
let reinit_trigger_capacity = config.general.me_reinit_trigger_channel.max(1);
|
||||
let (reinit_tx, reinit_rx) = mpsc::channel::<MeReinitTrigger>(reinit_trigger_capacity);
|
||||
|
||||
let pool_clone_sched = pool.clone();
|
||||
let rng_clone_sched = rng.clone();
|
||||
let config_rx_clone_sched = config_rx.clone();
|
||||
tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_reinit_scheduler(
|
||||
pool_clone_sched,
|
||||
rng_clone_sched,
|
||||
config_rx_clone_sched,
|
||||
reinit_rx,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
|
||||
let pool_clone = pool.clone();
|
||||
let config_rx_clone = config_rx.clone();
|
||||
let reinit_tx_updater = reinit_tx.clone();
|
||||
tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_config_updater(
|
||||
pool_clone,
|
||||
config_rx_clone,
|
||||
reinit_tx_updater,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
|
||||
let config_rx_clone_rot = config_rx.clone();
|
||||
let reinit_tx_rotation = reinit_tx.clone();
|
||||
tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_rotation_task(
|
||||
config_rx_clone_rot,
|
||||
reinit_tx_rotation,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
}
|
||||
|
||||
RuntimeWatches {
|
||||
config_rx,
|
||||
log_level_rx,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn apply_runtime_log_filter(
|
||||
has_rust_log: bool,
|
||||
effective_log_level: &LogLevel,
|
||||
filter_handle: reload::Handle<EnvFilter, tracing_subscriber::Registry>,
|
||||
mut log_level_rx: watch::Receiver<LogLevel>,
|
||||
) {
|
||||
let runtime_filter = if has_rust_log {
|
||||
EnvFilter::from_default_env()
|
||||
} else if matches!(effective_log_level, LogLevel::Silent) {
|
||||
EnvFilter::new("warn,telemt::links=info")
|
||||
} else {
|
||||
EnvFilter::new(effective_log_level.to_filter_str())
|
||||
};
|
||||
filter_handle
|
||||
.reload(runtime_filter)
|
||||
.expect("Failed to switch log filter");
|
||||
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if log_level_rx.changed().await.is_err() {
|
||||
break;
|
||||
}
|
||||
let level = log_level_rx.borrow_and_update().clone();
|
||||
let new_filter = tracing_subscriber::EnvFilter::new(level.to_filter_str());
|
||||
if let Err(e) = filter_handle.reload(new_filter) {
|
||||
tracing::error!("config reload: failed to update log filter: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
pub(crate) async fn spawn_metrics_if_configured(
|
||||
config: &Arc<ProxyConfig>,
|
||||
startup_tracker: &Arc<StartupTracker>,
|
||||
stats: Arc<Stats>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
) {
|
||||
// metrics_listen takes precedence; fall back to metrics_port for backward compat.
|
||||
let metrics_target: Option<(u16, Option<String>)> =
|
||||
if let Some(ref listen) = config.server.metrics_listen {
|
||||
match listen.parse::<std::net::SocketAddr>() {
|
||||
Ok(addr) => Some((addr.port(), Some(listen.clone()))),
|
||||
Err(e) => {
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_METRICS_START,
|
||||
Some(format!("invalid metrics_listen \"{}\": {}", listen, e)),
|
||||
)
|
||||
.await;
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
config.server.metrics_port.map(|p| (p, None))
|
||||
};
|
||||
|
||||
if let Some((port, listen)) = metrics_target {
|
||||
let fallback_label = format!("port {}", port);
|
||||
let label = listen.as_deref().unwrap_or(&fallback_label);
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_METRICS_START,
|
||||
Some(format!("spawn metrics endpoint on {}", label)),
|
||||
)
|
||||
.await;
|
||||
let stats = stats.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
let config_rx_metrics = config_rx.clone();
|
||||
let ip_tracker_metrics = ip_tracker.clone();
|
||||
let whitelist = config.server.metrics_whitelist.clone();
|
||||
tokio::spawn(async move {
|
||||
metrics::serve(
|
||||
port,
|
||||
listen,
|
||||
stats,
|
||||
beobachten,
|
||||
ip_tracker_metrics,
|
||||
config_rx_metrics,
|
||||
whitelist,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_METRICS_START,
|
||||
Some("metrics task spawned".to_string()),
|
||||
)
|
||||
.await;
|
||||
} else if config.server.metrics_listen.is_none() {
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_METRICS_START,
|
||||
Some("server.metrics_port is not configured".to_string()),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn mark_runtime_ready(startup_tracker: &Arc<StartupTracker>) {
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_RUNTIME_READY,
|
||||
Some("startup pipeline is fully initialized".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker.mark_ready().await;
|
||||
}
|
||||
45
src/maestro/shutdown.rs
Normal file
45
src/maestro/shutdown.rs
Normal file
@@ -0,0 +1,45 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use tokio::signal;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
|
||||
use super::helpers::{format_uptime, unit_label};
|
||||
|
||||
pub(crate) async fn wait_for_shutdown(process_started_at: Instant, me_pool: Option<Arc<MePool>>) {
|
||||
match signal::ctrl_c().await {
|
||||
Ok(()) => {
|
||||
let shutdown_started_at = Instant::now();
|
||||
info!("Shutting down...");
|
||||
let uptime_secs = process_started_at.elapsed().as_secs();
|
||||
info!("Uptime: {}", format_uptime(uptime_secs));
|
||||
if let Some(pool) = &me_pool {
|
||||
match tokio::time::timeout(
|
||||
Duration::from_secs(2),
|
||||
pool.shutdown_send_close_conn_all(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(total) => {
|
||||
info!(
|
||||
close_conn_sent = total,
|
||||
"ME shutdown: RPC_CLOSE_CONN broadcast completed"
|
||||
);
|
||||
}
|
||||
Err(_) => {
|
||||
warn!("ME shutdown: RPC_CLOSE_CONN broadcast timed out");
|
||||
}
|
||||
}
|
||||
}
|
||||
let shutdown_secs = shutdown_started_at.elapsed().as_secs();
|
||||
info!(
|
||||
"Shutdown completed successfully in {} {}.",
|
||||
shutdown_secs,
|
||||
unit_label(shutdown_secs, "second", "seconds")
|
||||
);
|
||||
}
|
||||
Err(e) => error!("Signal error: {}", e),
|
||||
}
|
||||
}
|
||||
188
src/maestro/tls_bootstrap.rs
Normal file
188
src/maestro/tls_bootstrap.rs
Normal file
@@ -0,0 +1,188 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use rand::RngExt;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::startup::{COMPONENT_TLS_FRONT_BOOTSTRAP, StartupTracker};
|
||||
use crate::tls_front::TlsFrontCache;
|
||||
use crate::tls_front::fetcher::TlsFetchStrategy;
|
||||
use crate::transport::UpstreamManager;
|
||||
|
||||
pub(crate) async fn bootstrap_tls_front(
|
||||
config: &ProxyConfig,
|
||||
tls_domains: &[String],
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
startup_tracker: &Arc<StartupTracker>,
|
||||
) -> Option<Arc<TlsFrontCache>> {
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_TLS_FRONT_BOOTSTRAP,
|
||||
Some("initialize TLS front cache/bootstrap tasks".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
let tls_cache: Option<Arc<TlsFrontCache>> = if config.censorship.tls_emulation {
|
||||
let cache = Arc::new(TlsFrontCache::new(
|
||||
tls_domains,
|
||||
config.censorship.fake_cert_len,
|
||||
&config.censorship.tls_front_dir,
|
||||
));
|
||||
cache.load_from_disk().await;
|
||||
|
||||
let port = config.censorship.mask_port;
|
||||
let proxy_protocol = config.censorship.mask_proxy_protocol;
|
||||
let mask_host = config
|
||||
.censorship
|
||||
.mask_host
|
||||
.clone()
|
||||
.unwrap_or_else(|| config.censorship.tls_domain.clone());
|
||||
let mask_unix_sock = config.censorship.mask_unix_sock.clone();
|
||||
let tls_fetch_scope = (!config.censorship.tls_fetch_scope.is_empty())
|
||||
.then(|| config.censorship.tls_fetch_scope.clone());
|
||||
let tls_fetch = config.censorship.tls_fetch.clone();
|
||||
let fetch_strategy = TlsFetchStrategy {
|
||||
profiles: tls_fetch.profiles,
|
||||
strict_route: tls_fetch.strict_route,
|
||||
attempt_timeout: Duration::from_millis(tls_fetch.attempt_timeout_ms.max(1)),
|
||||
total_budget: Duration::from_millis(tls_fetch.total_budget_ms.max(1)),
|
||||
grease_enabled: tls_fetch.grease_enabled,
|
||||
deterministic: tls_fetch.deterministic,
|
||||
profile_cache_ttl: Duration::from_secs(tls_fetch.profile_cache_ttl_secs),
|
||||
};
|
||||
let fetch_timeout = fetch_strategy.total_budget;
|
||||
|
||||
let cache_initial = cache.clone();
|
||||
let domains_initial = tls_domains.to_vec();
|
||||
let host_initial = mask_host.clone();
|
||||
let unix_sock_initial = mask_unix_sock.clone();
|
||||
let scope_initial = tls_fetch_scope.clone();
|
||||
let upstream_initial = upstream_manager.clone();
|
||||
let strategy_initial = fetch_strategy.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut join = tokio::task::JoinSet::new();
|
||||
for domain in domains_initial {
|
||||
let cache_domain = cache_initial.clone();
|
||||
let host_domain = host_initial.clone();
|
||||
let unix_sock_domain = unix_sock_initial.clone();
|
||||
let scope_domain = scope_initial.clone();
|
||||
let upstream_domain = upstream_initial.clone();
|
||||
let strategy_domain = strategy_initial.clone();
|
||||
join.spawn(async move {
|
||||
match crate::tls_front::fetcher::fetch_real_tls_with_strategy(
|
||||
&host_domain,
|
||||
port,
|
||||
&domain,
|
||||
&strategy_domain,
|
||||
Some(upstream_domain),
|
||||
scope_domain.as_deref(),
|
||||
proxy_protocol,
|
||||
unix_sock_domain.as_deref(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(res) => cache_domain.update_from_fetch(&domain, res).await,
|
||||
Err(e) => {
|
||||
warn!(domain = %domain, error = %e, "TLS emulation initial fetch failed")
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
while let Some(res) = join.join_next().await {
|
||||
if let Err(e) = res {
|
||||
warn!(error = %e, "TLS emulation initial fetch task join failed");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let cache_timeout = cache.clone();
|
||||
let domains_timeout = tls_domains.to_vec();
|
||||
let fake_cert_len = config.censorship.fake_cert_len;
|
||||
tokio::spawn(async move {
|
||||
tokio::time::sleep(fetch_timeout).await;
|
||||
for domain in domains_timeout {
|
||||
let cached = cache_timeout.get(&domain).await;
|
||||
if cached.domain == "default" {
|
||||
warn!(
|
||||
domain = %domain,
|
||||
timeout_secs = fetch_timeout.as_secs(),
|
||||
fake_cert_len,
|
||||
"TLS-front fetch not ready within timeout; using cache/default fake cert fallback"
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let cache_refresh = cache.clone();
|
||||
let domains_refresh = tls_domains.to_vec();
|
||||
let host_refresh = mask_host.clone();
|
||||
let unix_sock_refresh = mask_unix_sock.clone();
|
||||
let scope_refresh = tls_fetch_scope.clone();
|
||||
let upstream_refresh = upstream_manager.clone();
|
||||
let strategy_refresh = fetch_strategy.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let base_secs = rand::rng().random_range(4 * 3600..=6 * 3600);
|
||||
let jitter_secs = rand::rng().random_range(0..=7200);
|
||||
tokio::time::sleep(Duration::from_secs(base_secs + jitter_secs)).await;
|
||||
|
||||
let mut join = tokio::task::JoinSet::new();
|
||||
for domain in domains_refresh.clone() {
|
||||
let cache_domain = cache_refresh.clone();
|
||||
let host_domain = host_refresh.clone();
|
||||
let unix_sock_domain = unix_sock_refresh.clone();
|
||||
let scope_domain = scope_refresh.clone();
|
||||
let upstream_domain = upstream_refresh.clone();
|
||||
let strategy_domain = strategy_refresh.clone();
|
||||
join.spawn(async move {
|
||||
match crate::tls_front::fetcher::fetch_real_tls_with_strategy(
|
||||
&host_domain,
|
||||
port,
|
||||
&domain,
|
||||
&strategy_domain,
|
||||
Some(upstream_domain),
|
||||
scope_domain.as_deref(),
|
||||
proxy_protocol,
|
||||
unix_sock_domain.as_deref(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(res) => cache_domain.update_from_fetch(&domain, res).await,
|
||||
Err(e) => {
|
||||
warn!(domain = %domain, error = %e, "TLS emulation refresh failed")
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
while let Some(res) = join.join_next().await {
|
||||
if let Err(e) = res {
|
||||
warn!(error = %e, "TLS emulation refresh task join failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Some(cache)
|
||||
} else {
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_TLS_FRONT_BOOTSTRAP,
|
||||
Some("censorship.tls_emulation is false".to_string()),
|
||||
)
|
||||
.await;
|
||||
None
|
||||
};
|
||||
|
||||
if tls_cache.is_some() {
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_TLS_FRONT_BOOTSTRAP,
|
||||
Some("tls front cache is initialized".to_string()),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
tls_cache
|
||||
}
|
||||
1294
src/main.rs
1294
src/main.rs
File diff suppressed because it is too large
Load Diff
1520
src/metrics.rs
1520
src/metrics.rs
File diff suppressed because it is too large
Load Diff
@@ -26,9 +26,7 @@ fn parse_ip_spec(ip_spec: &str) -> Result<IpAddr> {
|
||||
}
|
||||
|
||||
let ip = ip_spec.parse::<IpAddr>().map_err(|_| {
|
||||
ProxyError::Config(format!(
|
||||
"network.dns_overrides IP is invalid: '{ip_spec}'"
|
||||
))
|
||||
ProxyError::Config(format!("network.dns_overrides IP is invalid: '{ip_spec}'"))
|
||||
})?;
|
||||
if matches!(ip, IpAddr::V6(_)) {
|
||||
return Err(ProxyError::Config(format!(
|
||||
@@ -103,9 +101,9 @@ pub fn validate_entries(entries: &[String]) -> Result<()> {
|
||||
/// Replace runtime DNS overrides with a new validated snapshot.
|
||||
pub fn install_entries(entries: &[String]) -> Result<()> {
|
||||
let parsed = parse_entries(entries)?;
|
||||
let mut guard = overrides_store()
|
||||
.write()
|
||||
.map_err(|_| ProxyError::Config("network.dns_overrides runtime lock is poisoned".to_string()))?;
|
||||
let mut guard = overrides_store().write().map_err(|_| {
|
||||
ProxyError::Config("network.dns_overrides runtime lock is poisoned".to_string())
|
||||
})?;
|
||||
*guard = parsed;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#![allow(dead_code)]
|
||||
#![allow(clippy::items_after_test_module)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
||||
@@ -8,9 +9,12 @@ use tokio::task::JoinSet;
|
||||
use tokio::time::timeout;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::config::NetworkConfig;
|
||||
use crate::config::{NetworkConfig, UpstreamConfig, UpstreamType};
|
||||
use crate::error::Result;
|
||||
use crate::network::stun::{stun_probe_dual, DualStunResult, IpFamily, StunProbeResult};
|
||||
use crate::network::stun::{
|
||||
DualStunResult, IpFamily, StunProbeResult, stun_probe_family_with_bind,
|
||||
};
|
||||
use crate::transport::UpstreamManager;
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct NetworkProbe {
|
||||
@@ -57,28 +61,28 @@ const STUN_BATCH_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
pub async fn run_probe(
|
||||
config: &NetworkConfig,
|
||||
upstreams: &[UpstreamConfig],
|
||||
nat_probe: bool,
|
||||
stun_nat_probe_concurrency: usize,
|
||||
) -> Result<NetworkProbe> {
|
||||
let mut probe = NetworkProbe::default();
|
||||
let servers = collect_stun_servers(config);
|
||||
let mut detected_ipv4 = detect_local_ip_v4();
|
||||
let mut detected_ipv6 = detect_local_ip_v6();
|
||||
let mut explicit_detected_ipv4 = false;
|
||||
let mut explicit_detected_ipv6 = false;
|
||||
let mut explicit_reflected_ipv4 = false;
|
||||
let mut explicit_reflected_ipv6 = false;
|
||||
let mut strict_bind_ipv4_requested = false;
|
||||
let mut strict_bind_ipv6_requested = false;
|
||||
|
||||
probe.detected_ipv4 = detect_local_ip_v4();
|
||||
probe.detected_ipv6 = detect_local_ip_v6();
|
||||
|
||||
probe.ipv4_is_bogon = probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false);
|
||||
probe.ipv6_is_bogon = probe.detected_ipv6.map(is_bogon_v6).unwrap_or(false);
|
||||
|
||||
let stun_res = if nat_probe && config.stun_use {
|
||||
let servers = collect_stun_servers(config);
|
||||
let global_stun_res = if nat_probe && config.stun_use {
|
||||
if servers.is_empty() {
|
||||
warn!("STUN probe is enabled but network.stun_servers is empty");
|
||||
DualStunResult::default()
|
||||
} else {
|
||||
probe_stun_servers_parallel(
|
||||
&servers,
|
||||
stun_nat_probe_concurrency.max(1),
|
||||
)
|
||||
.await
|
||||
probe_stun_servers_parallel(&servers, stun_nat_probe_concurrency.max(1), None, None)
|
||||
.await
|
||||
}
|
||||
} else if nat_probe {
|
||||
info!("STUN probe is disabled by network.stun_use=false");
|
||||
@@ -86,18 +90,118 @@ pub async fn run_probe(
|
||||
} else {
|
||||
DualStunResult::default()
|
||||
};
|
||||
probe.reflected_ipv4 = stun_res.v4.map(|r| r.reflected_addr);
|
||||
probe.reflected_ipv6 = stun_res.v6.map(|r| r.reflected_addr);
|
||||
let mut reflected_ipv4 = global_stun_res.v4.map(|r| r.reflected_addr);
|
||||
let mut reflected_ipv6 = global_stun_res.v6.map(|r| r.reflected_addr);
|
||||
|
||||
for upstream in upstreams.iter().filter(|upstream| upstream.enabled) {
|
||||
let UpstreamType::Direct {
|
||||
interface,
|
||||
bind_addresses,
|
||||
} = &upstream.upstream_type
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
if let Some(addrs) = bind_addresses.as_ref().filter(|v| !v.is_empty()) {
|
||||
let mut saw_parsed_ip = false;
|
||||
for value in addrs {
|
||||
if let Ok(ip) = value.parse::<IpAddr>() {
|
||||
saw_parsed_ip = true;
|
||||
if ip.is_ipv4() {
|
||||
strict_bind_ipv4_requested = true;
|
||||
} else {
|
||||
strict_bind_ipv6_requested = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if !saw_parsed_ip {
|
||||
strict_bind_ipv4_requested = true;
|
||||
strict_bind_ipv6_requested = true;
|
||||
}
|
||||
}
|
||||
|
||||
let bind_v4 = UpstreamManager::resolve_bind_address(
|
||||
interface,
|
||||
bind_addresses,
|
||||
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(198, 51, 100, 1)), 443),
|
||||
None,
|
||||
true,
|
||||
);
|
||||
let bind_v6 = UpstreamManager::resolve_bind_address(
|
||||
interface,
|
||||
bind_addresses,
|
||||
SocketAddr::new(
|
||||
IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1)),
|
||||
443,
|
||||
),
|
||||
None,
|
||||
true,
|
||||
);
|
||||
|
||||
if let Some(IpAddr::V4(ip)) = bind_v4
|
||||
&& !explicit_detected_ipv4
|
||||
{
|
||||
detected_ipv4 = Some(ip);
|
||||
explicit_detected_ipv4 = true;
|
||||
}
|
||||
if let Some(IpAddr::V6(ip)) = bind_v6
|
||||
&& !explicit_detected_ipv6
|
||||
{
|
||||
detected_ipv6 = Some(ip);
|
||||
explicit_detected_ipv6 = true;
|
||||
}
|
||||
if bind_v4.is_none() && bind_v6.is_none() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if !(nat_probe && config.stun_use) || servers.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let direct_stun_res = probe_stun_servers_parallel(
|
||||
&servers,
|
||||
stun_nat_probe_concurrency.max(1),
|
||||
bind_v4,
|
||||
bind_v6,
|
||||
)
|
||||
.await;
|
||||
if let Some(reflected) = direct_stun_res.v4.map(|r| r.reflected_addr) {
|
||||
reflected_ipv4 = Some(reflected);
|
||||
explicit_reflected_ipv4 = true;
|
||||
}
|
||||
if let Some(reflected) = direct_stun_res.v6.map(|r| r.reflected_addr) {
|
||||
reflected_ipv6 = Some(reflected);
|
||||
explicit_reflected_ipv6 = true;
|
||||
}
|
||||
}
|
||||
|
||||
if strict_bind_ipv4_requested && !explicit_detected_ipv4 {
|
||||
detected_ipv4 = None;
|
||||
reflected_ipv4 = None;
|
||||
} else if strict_bind_ipv4_requested && !explicit_reflected_ipv4 {
|
||||
reflected_ipv4 = None;
|
||||
}
|
||||
if strict_bind_ipv6_requested && !explicit_detected_ipv6 {
|
||||
detected_ipv6 = None;
|
||||
reflected_ipv6 = None;
|
||||
} else if strict_bind_ipv6_requested && !explicit_reflected_ipv6 {
|
||||
reflected_ipv6 = None;
|
||||
}
|
||||
|
||||
probe.detected_ipv4 = detected_ipv4;
|
||||
probe.detected_ipv6 = detected_ipv6;
|
||||
probe.reflected_ipv4 = reflected_ipv4;
|
||||
probe.reflected_ipv6 = reflected_ipv6;
|
||||
probe.ipv4_is_bogon = probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false);
|
||||
probe.ipv6_is_bogon = probe.detected_ipv6.map(is_bogon_v6).unwrap_or(false);
|
||||
|
||||
// If STUN is blocked but IPv4 is private, try HTTP public-IP fallback.
|
||||
if nat_probe
|
||||
&& probe.reflected_ipv4.is_none()
|
||||
&& probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false)
|
||||
&& let Some(public_ip) = detect_public_ipv4_http(&config.http_ip_detect_urls).await
|
||||
{
|
||||
if let Some(public_ip) = detect_public_ipv4_http(&config.http_ip_detect_urls).await {
|
||||
probe.reflected_ipv4 = Some(SocketAddr::new(IpAddr::V4(public_ip), 0));
|
||||
info!(public_ip = %public_ip, "STUN unavailable, using HTTP public IPv4 fallback");
|
||||
}
|
||||
probe.reflected_ipv4 = Some(SocketAddr::new(IpAddr::V4(public_ip), 0));
|
||||
info!(public_ip = %public_ip, "STUN unavailable, using HTTP public IPv4 fallback");
|
||||
}
|
||||
|
||||
probe.ipv4_nat_detected = match (probe.detected_ipv4, probe.reflected_ipv4) {
|
||||
@@ -111,12 +215,20 @@ pub async fn run_probe(
|
||||
|
||||
probe.ipv4_usable = config.ipv4
|
||||
&& probe.detected_ipv4.is_some()
|
||||
&& (!probe.ipv4_is_bogon || probe.reflected_ipv4.map(|r| !is_bogon(r.ip())).unwrap_or(false));
|
||||
&& (!probe.ipv4_is_bogon
|
||||
|| probe
|
||||
.reflected_ipv4
|
||||
.map(|r| !is_bogon(r.ip()))
|
||||
.unwrap_or(false));
|
||||
|
||||
let ipv6_enabled = config.ipv6.unwrap_or(probe.detected_ipv6.is_some());
|
||||
probe.ipv6_usable = ipv6_enabled
|
||||
&& probe.detected_ipv6.is_some()
|
||||
&& (!probe.ipv6_is_bogon || probe.reflected_ipv6.map(|r| !is_bogon(r.ip())).unwrap_or(false));
|
||||
&& (!probe.ipv6_is_bogon
|
||||
|| probe
|
||||
.reflected_ipv6
|
||||
.map(|r| !is_bogon(r.ip()))
|
||||
.unwrap_or(false));
|
||||
|
||||
Ok(probe)
|
||||
}
|
||||
@@ -162,6 +274,8 @@ fn collect_stun_servers(config: &NetworkConfig) -> Vec<String> {
|
||||
async fn probe_stun_servers_parallel(
|
||||
servers: &[String],
|
||||
concurrency: usize,
|
||||
bind_v4: Option<IpAddr>,
|
||||
bind_v6: Option<IpAddr>,
|
||||
) -> DualStunResult {
|
||||
let mut join_set = JoinSet::new();
|
||||
let mut next_idx = 0usize;
|
||||
@@ -173,7 +287,12 @@ async fn probe_stun_servers_parallel(
|
||||
let stun_addr = servers[next_idx].clone();
|
||||
next_idx += 1;
|
||||
join_set.spawn(async move {
|
||||
let res = timeout(STUN_BATCH_TIMEOUT, stun_probe_dual(&stun_addr)).await;
|
||||
let res = timeout(STUN_BATCH_TIMEOUT, async {
|
||||
let v4 = stun_probe_family_with_bind(&stun_addr, IpFamily::V4, bind_v4).await?;
|
||||
let v6 = stun_probe_family_with_bind(&stun_addr, IpFamily::V6, bind_v6).await?;
|
||||
Ok::<DualStunResult, crate::error::ProxyError>(DualStunResult { v4, v6 })
|
||||
})
|
||||
.await;
|
||||
(stun_addr, res)
|
||||
});
|
||||
}
|
||||
@@ -185,11 +304,15 @@ async fn probe_stun_servers_parallel(
|
||||
match task {
|
||||
Ok((stun_addr, Ok(Ok(result)))) => {
|
||||
if let Some(v4) = result.v4 {
|
||||
let entry = best_v4_by_ip.entry(v4.reflected_addr.ip()).or_insert((0, v4));
|
||||
let entry = best_v4_by_ip
|
||||
.entry(v4.reflected_addr.ip())
|
||||
.or_insert((0, v4));
|
||||
entry.0 += 1;
|
||||
}
|
||||
if let Some(v6) = result.v6 {
|
||||
let entry = best_v6_by_ip.entry(v6.reflected_addr.ip()).or_insert((0, v6));
|
||||
let entry = best_v6_by_ip
|
||||
.entry(v6.reflected_addr.ip())
|
||||
.or_insert((0, v6));
|
||||
entry.0 += 1;
|
||||
}
|
||||
if result.v4.is_some() || result.v6.is_some() {
|
||||
@@ -209,35 +332,36 @@ async fn probe_stun_servers_parallel(
|
||||
}
|
||||
|
||||
let mut out = DualStunResult::default();
|
||||
if let Some((_, best)) = best_v4_by_ip
|
||||
.into_values()
|
||||
.max_by_key(|(count, _)| *count)
|
||||
{
|
||||
if let Some((_, best)) = best_v4_by_ip.into_values().max_by_key(|(count, _)| *count) {
|
||||
info!("STUN-Quorum reached, IP: {}", best.reflected_addr.ip());
|
||||
out.v4 = Some(best);
|
||||
}
|
||||
if let Some((_, best)) = best_v6_by_ip
|
||||
.into_values()
|
||||
.max_by_key(|(count, _)| *count)
|
||||
{
|
||||
if let Some((_, best)) = best_v6_by_ip.into_values().max_by_key(|(count, _)| *count) {
|
||||
info!("STUN-Quorum reached, IP: {}", best.reflected_addr.ip());
|
||||
out.v6 = Some(best);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
pub fn decide_network_capabilities(config: &NetworkConfig, probe: &NetworkProbe) -> NetworkDecision {
|
||||
pub fn decide_network_capabilities(
|
||||
config: &NetworkConfig,
|
||||
probe: &NetworkProbe,
|
||||
middle_proxy_nat_ip: Option<IpAddr>,
|
||||
) -> NetworkDecision {
|
||||
let ipv4_dc = config.ipv4 && probe.detected_ipv4.is_some();
|
||||
let ipv6_dc = config.ipv6.unwrap_or(probe.detected_ipv6.is_some()) && probe.detected_ipv6.is_some();
|
||||
let ipv6_dc =
|
||||
config.ipv6.unwrap_or(probe.detected_ipv6.is_some()) && probe.detected_ipv6.is_some();
|
||||
let nat_ip_v4 = matches!(middle_proxy_nat_ip, Some(IpAddr::V4(_)));
|
||||
let nat_ip_v6 = matches!(middle_proxy_nat_ip, Some(IpAddr::V6(_)));
|
||||
|
||||
let ipv4_me = config.ipv4
|
||||
&& probe.detected_ipv4.is_some()
|
||||
&& (!probe.ipv4_is_bogon || probe.reflected_ipv4.is_some());
|
||||
&& (!probe.ipv4_is_bogon || probe.reflected_ipv4.is_some() || nat_ip_v4);
|
||||
|
||||
let ipv6_enabled = config.ipv6.unwrap_or(probe.detected_ipv6.is_some());
|
||||
let ipv6_me = ipv6_enabled
|
||||
&& probe.detected_ipv6.is_some()
|
||||
&& (!probe.ipv6_is_bogon || probe.reflected_ipv6.is_some());
|
||||
&& (!probe.ipv6_is_bogon || probe.reflected_ipv6.is_some() || nat_ip_v6);
|
||||
|
||||
let effective_prefer = match config.prefer {
|
||||
6 if ipv6_me || ipv6_dc => 6,
|
||||
@@ -262,6 +386,58 @@ pub fn decide_network_capabilities(config: &NetworkConfig, probe: &NetworkProbe)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::config::NetworkConfig;
|
||||
|
||||
#[test]
|
||||
fn manual_nat_ip_enables_ipv4_me_without_reflection() {
|
||||
let config = NetworkConfig {
|
||||
ipv4: true,
|
||||
..Default::default()
|
||||
};
|
||||
let probe = NetworkProbe {
|
||||
detected_ipv4: Some(Ipv4Addr::new(10, 0, 0, 10)),
|
||||
ipv4_is_bogon: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let decision = decide_network_capabilities(
|
||||
&config,
|
||||
&probe,
|
||||
Some(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4))),
|
||||
);
|
||||
|
||||
assert!(decision.ipv4_me);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn manual_nat_ip_does_not_enable_other_family() {
|
||||
let config = NetworkConfig {
|
||||
ipv4: true,
|
||||
ipv6: Some(true),
|
||||
..Default::default()
|
||||
};
|
||||
let probe = NetworkProbe {
|
||||
detected_ipv4: Some(Ipv4Addr::new(10, 0, 0, 10)),
|
||||
detected_ipv6: Some(Ipv6Addr::LOCALHOST),
|
||||
ipv4_is_bogon: true,
|
||||
ipv6_is_bogon: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let decision = decide_network_capabilities(
|
||||
&config,
|
||||
&probe,
|
||||
Some(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4))),
|
||||
);
|
||||
|
||||
assert!(decision.ipv4_me);
|
||||
assert!(!decision.ipv6_me);
|
||||
}
|
||||
}
|
||||
|
||||
fn detect_local_ip_v4() -> Option<Ipv4Addr> {
|
||||
let socket = UdpSocket::bind("0.0.0.0:0").ok()?;
|
||||
socket.connect("8.8.8.8:80").ok()?;
|
||||
@@ -280,6 +456,14 @@ fn detect_local_ip_v6() -> Option<Ipv6Addr> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn detect_interface_ipv4() -> Option<Ipv4Addr> {
|
||||
detect_local_ip_v4()
|
||||
}
|
||||
|
||||
pub fn detect_interface_ipv6() -> Option<Ipv6Addr> {
|
||||
detect_local_ip_v6()
|
||||
}
|
||||
|
||||
pub fn is_bogon(ip: IpAddr) -> bool {
|
||||
match ip {
|
||||
IpAddr::V4(v4) => is_bogon_v4(v4),
|
||||
@@ -353,10 +537,26 @@ pub fn is_bogon_v6(ip: Ipv6Addr) -> bool {
|
||||
|
||||
pub fn log_probe_result(probe: &NetworkProbe, decision: &NetworkDecision) {
|
||||
info!(
|
||||
ipv4 = probe.detected_ipv4.as_ref().map(|v| v.to_string()).unwrap_or_else(|| "-".into()),
|
||||
ipv6 = probe.detected_ipv6.as_ref().map(|v| v.to_string()).unwrap_or_else(|| "-".into()),
|
||||
reflected_v4 = probe.reflected_ipv4.as_ref().map(|v| v.ip().to_string()).unwrap_or_else(|| "-".into()),
|
||||
reflected_v6 = probe.reflected_ipv6.as_ref().map(|v| v.ip().to_string()).unwrap_or_else(|| "-".into()),
|
||||
ipv4 = probe
|
||||
.detected_ipv4
|
||||
.as_ref()
|
||||
.map(|v| v.to_string())
|
||||
.unwrap_or_else(|| "-".into()),
|
||||
ipv6 = probe
|
||||
.detected_ipv6
|
||||
.as_ref()
|
||||
.map(|v| v.to_string())
|
||||
.unwrap_or_else(|| "-".into()),
|
||||
reflected_v4 = probe
|
||||
.reflected_ipv4
|
||||
.as_ref()
|
||||
.map(|v| v.ip().to_string())
|
||||
.unwrap_or_else(|| "-".into()),
|
||||
reflected_v6 = probe
|
||||
.reflected_ipv6
|
||||
.as_ref()
|
||||
.map(|v| v.ip().to_string())
|
||||
.unwrap_or_else(|| "-".into()),
|
||||
ipv4_bogon = probe.ipv4_is_bogon,
|
||||
ipv6_bogon = probe.ipv6_is_bogon,
|
||||
ipv4_me = decision.ipv4_me,
|
||||
|
||||
@@ -2,13 +2,20 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use tokio::net::{lookup_host, UdpSocket};
|
||||
use tokio::time::{timeout, Duration, sleep};
|
||||
use tokio::net::{UdpSocket, lookup_host};
|
||||
use tokio::time::{Duration, sleep, timeout};
|
||||
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::error::{ProxyError, Result};
|
||||
use crate::network::dns_overrides::{resolve, split_host_port};
|
||||
|
||||
fn stun_rng() -> &'static SecureRandom {
|
||||
static STUN_RNG: OnceLock<SecureRandom> = OnceLock::new();
|
||||
STUN_RNG.get_or_init(SecureRandom::new)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum IpFamily {
|
||||
V4,
|
||||
@@ -34,13 +41,13 @@ pub async fn stun_probe_dual(stun_addr: &str) -> Result<DualStunResult> {
|
||||
stun_probe_family(stun_addr, IpFamily::V6),
|
||||
);
|
||||
|
||||
Ok(DualStunResult {
|
||||
v4: v4?,
|
||||
v6: v6?,
|
||||
})
|
||||
Ok(DualStunResult { v4: v4?, v6: v6? })
|
||||
}
|
||||
|
||||
pub async fn stun_probe_family(stun_addr: &str, family: IpFamily) -> Result<Option<StunProbeResult>> {
|
||||
pub async fn stun_probe_family(
|
||||
stun_addr: &str,
|
||||
family: IpFamily,
|
||||
) -> Result<Option<StunProbeResult>> {
|
||||
stun_probe_family_with_bind(stun_addr, family, None).await
|
||||
}
|
||||
|
||||
@@ -49,8 +56,6 @@ pub async fn stun_probe_family_with_bind(
|
||||
family: IpFamily,
|
||||
bind_ip: Option<IpAddr>,
|
||||
) -> Result<Option<StunProbeResult>> {
|
||||
use rand::RngCore;
|
||||
|
||||
let bind_addr = match (family, bind_ip) {
|
||||
(IpFamily::V4, Some(IpAddr::V4(ip))) => SocketAddr::new(IpAddr::V4(ip), 0),
|
||||
(IpFamily::V6, Some(IpAddr::V6(ip))) => SocketAddr::new(IpAddr::V6(ip), 0),
|
||||
@@ -71,13 +76,18 @@ pub async fn stun_probe_family_with_bind(
|
||||
if let Some(addr) = target_addr {
|
||||
match socket.connect(addr).await {
|
||||
Ok(()) => {}
|
||||
Err(e) if family == IpFamily::V6 && matches!(
|
||||
e.kind(),
|
||||
std::io::ErrorKind::NetworkUnreachable
|
||||
| std::io::ErrorKind::HostUnreachable
|
||||
| std::io::ErrorKind::Unsupported
|
||||
| std::io::ErrorKind::NetworkDown
|
||||
) => return Ok(None),
|
||||
Err(e)
|
||||
if family == IpFamily::V6
|
||||
&& matches!(
|
||||
e.kind(),
|
||||
std::io::ErrorKind::NetworkUnreachable
|
||||
| std::io::ErrorKind::HostUnreachable
|
||||
| std::io::ErrorKind::Unsupported
|
||||
| std::io::ErrorKind::NetworkDown
|
||||
) =>
|
||||
{
|
||||
return Ok(None);
|
||||
}
|
||||
Err(e) => return Err(ProxyError::Proxy(format!("STUN connect failed: {e}"))),
|
||||
}
|
||||
} else {
|
||||
@@ -88,7 +98,7 @@ pub async fn stun_probe_family_with_bind(
|
||||
req[0..2].copy_from_slice(&0x0001u16.to_be_bytes()); // Binding Request
|
||||
req[2..4].copy_from_slice(&0u16.to_be_bytes()); // length
|
||||
req[4..8].copy_from_slice(&0x2112A442u32.to_be_bytes()); // magic cookie
|
||||
rand::rng().fill_bytes(&mut req[8..20]); // transaction ID
|
||||
stun_rng().fill(&mut req[8..20]); // transaction ID
|
||||
|
||||
let mut buf = [0u8; 256];
|
||||
let mut attempt = 0;
|
||||
@@ -120,16 +130,16 @@ pub async fn stun_probe_family_with_bind(
|
||||
|
||||
let magic = 0x2112A442u32.to_be_bytes();
|
||||
let txid = &req[8..20];
|
||||
let mut idx = 20;
|
||||
while idx + 4 <= n {
|
||||
let atype = u16::from_be_bytes(buf[idx..idx + 2].try_into().unwrap());
|
||||
let alen = u16::from_be_bytes(buf[idx + 2..idx + 4].try_into().unwrap()) as usize;
|
||||
idx += 4;
|
||||
if idx + alen > n {
|
||||
break;
|
||||
}
|
||||
let mut idx = 20;
|
||||
while idx + 4 <= n {
|
||||
let atype = u16::from_be_bytes(buf[idx..idx + 2].try_into().unwrap());
|
||||
let alen = u16::from_be_bytes(buf[idx + 2..idx + 4].try_into().unwrap()) as usize;
|
||||
idx += 4;
|
||||
if idx + alen > n {
|
||||
break;
|
||||
}
|
||||
|
||||
match atype {
|
||||
match atype {
|
||||
0x0020 /* XOR-MAPPED-ADDRESS */ | 0x0001 /* MAPPED-ADDRESS */ => {
|
||||
if alen < 8 {
|
||||
break;
|
||||
@@ -198,9 +208,8 @@ pub async fn stun_probe_family_with_bind(
|
||||
_ => {}
|
||||
}
|
||||
|
||||
idx += (alen + 3) & !3;
|
||||
}
|
||||
|
||||
idx += (alen + 3) & !3;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
@@ -228,7 +237,11 @@ async fn resolve_stun_addr(stun_addr: &str, family: IpFamily) -> Result<Option<S
|
||||
.await
|
||||
.map_err(|e| ProxyError::Proxy(format!("STUN resolve failed: {e}")))?;
|
||||
|
||||
let target = addrs
|
||||
.find(|a| matches!((a.is_ipv4(), family), (true, IpFamily::V4) | (false, IpFamily::V6)));
|
||||
let target = addrs.find(|a| {
|
||||
matches!(
|
||||
(a.is_ipv4(), family),
|
||||
(true, IpFamily::V4) | (false, IpFamily::V6)
|
||||
)
|
||||
});
|
||||
Ok(target)
|
||||
}
|
||||
|
||||
@@ -33,35 +33,89 @@ pub static TG_DATACENTERS_V6: LazyLock<Vec<IpAddr>> = LazyLock::new(|| {
|
||||
|
||||
// ============= Middle Proxies (for advertising) =============
|
||||
|
||||
pub static TG_MIDDLE_PROXIES_V4: LazyLock<std::collections::HashMap<i32, Vec<(IpAddr, u16)>>> =
|
||||
pub static TG_MIDDLE_PROXIES_V4: LazyLock<std::collections::HashMap<i32, Vec<(IpAddr, u16)>>> =
|
||||
LazyLock::new(|| {
|
||||
let mut m = std::collections::HashMap::new();
|
||||
m.insert(1, vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 50)), 8888)]);
|
||||
m.insert(-1, vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 50)), 8888)]);
|
||||
m.insert(2, vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 161, 144)), 8888)]);
|
||||
m.insert(-2, vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 161, 144)), 8888)]);
|
||||
m.insert(3, vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 100)), 8888)]);
|
||||
m.insert(-3, vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 100)), 8888)]);
|
||||
m.insert(
|
||||
1,
|
||||
vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 50)), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
-1,
|
||||
vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 50)), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
2,
|
||||
vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 161, 144)), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
-2,
|
||||
vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 161, 144)), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
3,
|
||||
vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 100)), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
-3,
|
||||
vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 100)), 8888)],
|
||||
);
|
||||
m.insert(4, vec![(IpAddr::V4(Ipv4Addr::new(91, 108, 4, 136)), 8888)]);
|
||||
m.insert(-4, vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 165, 109)), 8888)]);
|
||||
m.insert(
|
||||
-4,
|
||||
vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 165, 109)), 8888)],
|
||||
);
|
||||
m.insert(5, vec![(IpAddr::V4(Ipv4Addr::new(91, 108, 56, 183)), 8888)]);
|
||||
m.insert(-5, vec![(IpAddr::V4(Ipv4Addr::new(91, 108, 56, 183)), 8888)]);
|
||||
m.insert(
|
||||
-5,
|
||||
vec![(IpAddr::V4(Ipv4Addr::new(91, 108, 56, 183)), 8888)],
|
||||
);
|
||||
m
|
||||
});
|
||||
|
||||
pub static TG_MIDDLE_PROXIES_V6: LazyLock<std::collections::HashMap<i32, Vec<(IpAddr, u16)>>> =
|
||||
pub static TG_MIDDLE_PROXIES_V6: LazyLock<std::collections::HashMap<i32, Vec<(IpAddr, u16)>>> =
|
||||
LazyLock::new(|| {
|
||||
let mut m = std::collections::HashMap::new();
|
||||
m.insert(1, vec![(IpAddr::V6("2001:b28:f23d:f001::d".parse().unwrap()), 8888)]);
|
||||
m.insert(-1, vec![(IpAddr::V6("2001:b28:f23d:f001::d".parse().unwrap()), 8888)]);
|
||||
m.insert(2, vec![(IpAddr::V6("2001:67c:04e8:f002::d".parse().unwrap()), 80)]);
|
||||
m.insert(-2, vec![(IpAddr::V6("2001:67c:04e8:f002::d".parse().unwrap()), 80)]);
|
||||
m.insert(3, vec![(IpAddr::V6("2001:b28:f23d:f003::d".parse().unwrap()), 8888)]);
|
||||
m.insert(-3, vec![(IpAddr::V6("2001:b28:f23d:f003::d".parse().unwrap()), 8888)]);
|
||||
m.insert(4, vec![(IpAddr::V6("2001:67c:04e8:f004::d".parse().unwrap()), 8888)]);
|
||||
m.insert(-4, vec![(IpAddr::V6("2001:67c:04e8:f004::d".parse().unwrap()), 8888)]);
|
||||
m.insert(5, vec![(IpAddr::V6("2001:b28:f23f:f005::d".parse().unwrap()), 8888)]);
|
||||
m.insert(-5, vec![(IpAddr::V6("2001:b28:f23f:f005::d".parse().unwrap()), 8888)]);
|
||||
m.insert(
|
||||
1,
|
||||
vec![(IpAddr::V6("2001:b28:f23d:f001::d".parse().unwrap()), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
-1,
|
||||
vec![(IpAddr::V6("2001:b28:f23d:f001::d".parse().unwrap()), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
2,
|
||||
vec![(IpAddr::V6("2001:67c:04e8:f002::d".parse().unwrap()), 80)],
|
||||
);
|
||||
m.insert(
|
||||
-2,
|
||||
vec![(IpAddr::V6("2001:67c:04e8:f002::d".parse().unwrap()), 80)],
|
||||
);
|
||||
m.insert(
|
||||
3,
|
||||
vec![(IpAddr::V6("2001:b28:f23d:f003::d".parse().unwrap()), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
-3,
|
||||
vec![(IpAddr::V6("2001:b28:f23d:f003::d".parse().unwrap()), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
4,
|
||||
vec![(IpAddr::V6("2001:67c:04e8:f004::d".parse().unwrap()), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
-4,
|
||||
vec![(IpAddr::V6("2001:67c:04e8:f004::d".parse().unwrap()), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
5,
|
||||
vec![(IpAddr::V6("2001:b28:f23f:f005::d".parse().unwrap()), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
-5,
|
||||
vec![(IpAddr::V6("2001:b28:f23f:f005::d".parse().unwrap()), 8888)],
|
||||
);
|
||||
m
|
||||
});
|
||||
|
||||
@@ -89,12 +143,12 @@ impl ProtoTag {
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Convert to 4 bytes (little-endian)
|
||||
pub fn to_bytes(self) -> [u8; 4] {
|
||||
(self as u32).to_le_bytes()
|
||||
}
|
||||
|
||||
|
||||
/// Get protocol tag as bytes slice
|
||||
pub fn as_bytes(&self) -> &'static [u8; 4] {
|
||||
match self {
|
||||
@@ -152,11 +206,29 @@ pub const TLS_RECORD_CHANGE_CIPHER: u8 = 0x14;
|
||||
pub const TLS_RECORD_APPLICATION: u8 = 0x17;
|
||||
/// TLS record type: Alert
|
||||
pub const TLS_RECORD_ALERT: u8 = 0x15;
|
||||
/// Maximum TLS record size
|
||||
pub const MAX_TLS_RECORD_SIZE: usize = 16384;
|
||||
/// Maximum TLS chunk size (with overhead)
|
||||
/// RFC 8446 §5.2 allows up to 16384 + 256 bytes of ciphertext
|
||||
pub const MAX_TLS_CHUNK_SIZE: usize = 16384 + 256;
|
||||
/// Maximum TLS plaintext record payload size.
|
||||
/// RFC 8446 §5.1: "The length MUST NOT exceed 2^14 bytes."
|
||||
/// Use this for validating incoming unencrypted records
|
||||
/// (ClientHello, ChangeCipherSpec, unprotected Handshake messages).
|
||||
pub const MAX_TLS_PLAINTEXT_SIZE: usize = 16_384;
|
||||
|
||||
/// Structural minimum for a valid TLS 1.3 ClientHello with SNI.
|
||||
/// Derived from RFC 8446 §4.1.2 field layout + Appendix D.4 compat mode.
|
||||
/// Deliberately conservative (below any real client) to avoid false
|
||||
/// positives on legitimate connections with compact extension sets.
|
||||
pub const MIN_TLS_CLIENT_HELLO_SIZE: usize = 100;
|
||||
|
||||
/// Maximum TLS ciphertext record payload size.
|
||||
/// RFC 8446 §5.2: "The length MUST NOT exceed 2^14 + 256 bytes."
|
||||
/// The +256 accounts for maximum AEAD expansion overhead.
|
||||
/// Use this for validating or sizing buffers for encrypted records.
|
||||
pub const MAX_TLS_CIPHERTEXT_SIZE: usize = 16_384 + 256;
|
||||
|
||||
#[deprecated(note = "use MAX_TLS_PLAINTEXT_SIZE")]
|
||||
pub const MAX_TLS_RECORD_SIZE: usize = MAX_TLS_PLAINTEXT_SIZE;
|
||||
|
||||
#[deprecated(note = "use MAX_TLS_CIPHERTEXT_SIZE")]
|
||||
pub const MAX_TLS_CHUNK_SIZE: usize = MAX_TLS_CIPHERTEXT_SIZE;
|
||||
|
||||
/// Secure Intermediate payload is expected to be 4-byte aligned.
|
||||
pub fn is_valid_secure_payload_len(data_len: usize) -> bool {
|
||||
@@ -204,9 +276,7 @@ pub const SMALL_BUFFER_SIZE: usize = 8192;
|
||||
// ============= Statistics =============
|
||||
|
||||
/// Duration buckets for histogram metrics
|
||||
pub static DURATION_BUCKETS: &[f64] = &[
|
||||
0.1, 0.5, 1.0, 2.0, 5.0, 15.0, 60.0, 300.0, 600.0, 1800.0,
|
||||
];
|
||||
pub static DURATION_BUCKETS: &[f64] = &[0.1, 0.5, 1.0, 2.0, 5.0, 15.0, 60.0, 300.0, 600.0, 1800.0];
|
||||
|
||||
// ============= Reserved Nonce Patterns =============
|
||||
|
||||
@@ -217,29 +287,27 @@ pub static RESERVED_NONCE_FIRST_BYTES: &[u8] = &[0xef];
|
||||
pub static RESERVED_NONCE_BEGINNINGS: &[[u8; 4]] = &[
|
||||
[0x48, 0x45, 0x41, 0x44], // HEAD
|
||||
[0x50, 0x4F, 0x53, 0x54], // POST
|
||||
[0x47, 0x45, 0x54, 0x20], // GET
|
||||
[0x47, 0x45, 0x54, 0x20], // GET
|
||||
[0xee, 0xee, 0xee, 0xee], // Intermediate
|
||||
[0xdd, 0xdd, 0xdd, 0xdd], // Secure
|
||||
[0x16, 0x03, 0x01, 0x02], // TLS
|
||||
];
|
||||
|
||||
/// Reserved continuation bytes (bytes 4-7)
|
||||
pub static RESERVED_NONCE_CONTINUES: &[[u8; 4]] = &[
|
||||
[0x00, 0x00, 0x00, 0x00],
|
||||
];
|
||||
pub static RESERVED_NONCE_CONTINUES: &[[u8; 4]] = &[[0x00, 0x00, 0x00, 0x00]];
|
||||
|
||||
// ============= RPC Constants (for Middle Proxy) =============
|
||||
|
||||
/// RPC Proxy Request
|
||||
/// RPC Flags (from Erlang mtp_rpc.erl)
|
||||
pub const RPC_FLAG_NOT_ENCRYPTED: u32 = 0x2;
|
||||
pub const RPC_FLAG_HAS_AD_TAG: u32 = 0x8;
|
||||
pub const RPC_FLAG_MAGIC: u32 = 0x1000;
|
||||
pub const RPC_FLAG_EXTMODE2: u32 = 0x20000;
|
||||
pub const RPC_FLAG_PAD: u32 = 0x8000000;
|
||||
pub const RPC_FLAG_INTERMEDIATE: u32 = 0x20000000;
|
||||
pub const RPC_FLAG_ABRIDGED: u32 = 0x40000000;
|
||||
pub const RPC_FLAG_QUICKACK: u32 = 0x80000000;
|
||||
pub const RPC_FLAG_HAS_AD_TAG: u32 = 0x8;
|
||||
pub const RPC_FLAG_MAGIC: u32 = 0x1000;
|
||||
pub const RPC_FLAG_EXTMODE2: u32 = 0x20000;
|
||||
pub const RPC_FLAG_PAD: u32 = 0x8000000;
|
||||
pub const RPC_FLAG_INTERMEDIATE: u32 = 0x20000000;
|
||||
pub const RPC_FLAG_ABRIDGED: u32 = 0x40000000;
|
||||
pub const RPC_FLAG_QUICKACK: u32 = 0x80000000;
|
||||
|
||||
pub const RPC_PROXY_REQ: [u8; 4] = [0xee, 0xf1, 0xce, 0x36];
|
||||
/// RPC Proxy Answer
|
||||
@@ -267,63 +335,66 @@ pub mod rpc_flags {
|
||||
pub const FLAG_QUICKACK: u32 = 0x80000000;
|
||||
}
|
||||
|
||||
// ============= Middle-End Proxy Servers =============
|
||||
pub const ME_PROXY_PORT: u16 = 8888;
|
||||
|
||||
// ============= Middle-End Proxy Servers =============
|
||||
pub const ME_PROXY_PORT: u16 = 8888;
|
||||
|
||||
pub static TG_MIDDLE_PROXIES_FLAT_V4: LazyLock<Vec<(IpAddr, u16)>> = LazyLock::new(|| {
|
||||
vec![
|
||||
(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 50)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(149, 154, 161, 144)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 100)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(91, 108, 4, 136)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(91, 108, 56, 183)), 8888),
|
||||
]
|
||||
});
|
||||
|
||||
// ============= RPC Constants (u32 native endian) =============
|
||||
// From mtproto-common.h + net-tcp-rpc-common.h + mtproto-proxy.c
|
||||
|
||||
pub const RPC_NONCE_U32: u32 = 0x7acb87aa;
|
||||
pub const RPC_HANDSHAKE_U32: u32 = 0x7682eef5;
|
||||
pub const RPC_HANDSHAKE_ERROR_U32: u32 = 0x6a27beda;
|
||||
pub const TL_PROXY_TAG_U32: u32 = 0xdb1e26ae; // mtproto-proxy.c:121
|
||||
|
||||
// mtproto-common.h
|
||||
pub const RPC_PROXY_REQ_U32: u32 = 0x36cef1ee;
|
||||
pub const RPC_PROXY_ANS_U32: u32 = 0x4403da0d;
|
||||
pub const RPC_CLOSE_CONN_U32: u32 = 0x1fcf425d;
|
||||
pub const RPC_CLOSE_EXT_U32: u32 = 0x5eb634a2;
|
||||
pub const RPC_SIMPLE_ACK_U32: u32 = 0x3bac409b;
|
||||
pub const RPC_PING_U32: u32 = 0x5730a2df;
|
||||
pub const RPC_PONG_U32: u32 = 0x8430eaa7;
|
||||
|
||||
pub const RPC_CRYPTO_NONE_U32: u32 = 0;
|
||||
pub const RPC_CRYPTO_AES_U32: u32 = 1;
|
||||
|
||||
pub mod proxy_flags {
|
||||
pub const FLAG_HAS_AD_TAG: u32 = 1;
|
||||
pub const FLAG_NOT_ENCRYPTED: u32 = 0x2;
|
||||
pub const FLAG_HAS_AD_TAG2: u32 = 0x8;
|
||||
pub const FLAG_MAGIC: u32 = 0x1000;
|
||||
pub const FLAG_EXTMODE2: u32 = 0x20000;
|
||||
pub const FLAG_PAD: u32 = 0x8000000;
|
||||
pub const FLAG_INTERMEDIATE: u32 = 0x20000000;
|
||||
pub const FLAG_ABRIDGED: u32 = 0x40000000;
|
||||
pub const FLAG_QUICKACK: u32 = 0x80000000;
|
||||
}
|
||||
pub static TG_MIDDLE_PROXIES_FLAT_V4: LazyLock<Vec<(IpAddr, u16)>> = LazyLock::new(|| {
|
||||
vec![
|
||||
(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 50)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(149, 154, 161, 144)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 100)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(91, 108, 4, 136)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(91, 108, 56, 183)), 8888),
|
||||
]
|
||||
});
|
||||
|
||||
pub mod rpc_crypto_flags {
|
||||
pub const USE_CRC32C: u32 = 0x800;
|
||||
}
|
||||
|
||||
pub const ME_CONNECT_TIMEOUT_SECS: u64 = 5;
|
||||
pub const ME_HANDSHAKE_TIMEOUT_SECS: u64 = 10;
|
||||
|
||||
#[cfg(test)]
|
||||
// ============= RPC Constants (u32 native endian) =============
|
||||
// From mtproto-common.h + net-tcp-rpc-common.h + mtproto-proxy.c
|
||||
|
||||
pub const RPC_NONCE_U32: u32 = 0x7acb87aa;
|
||||
pub const RPC_HANDSHAKE_U32: u32 = 0x7682eef5;
|
||||
pub const RPC_HANDSHAKE_ERROR_U32: u32 = 0x6a27beda;
|
||||
pub const TL_PROXY_TAG_U32: u32 = 0xdb1e26ae; // mtproto-proxy.c:121
|
||||
|
||||
// mtproto-common.h
|
||||
pub const RPC_PROXY_REQ_U32: u32 = 0x36cef1ee;
|
||||
pub const RPC_PROXY_ANS_U32: u32 = 0x4403da0d;
|
||||
pub const RPC_CLOSE_CONN_U32: u32 = 0x1fcf425d;
|
||||
pub const RPC_CLOSE_EXT_U32: u32 = 0x5eb634a2;
|
||||
pub const RPC_SIMPLE_ACK_U32: u32 = 0x3bac409b;
|
||||
pub const RPC_PING_U32: u32 = 0x5730a2df;
|
||||
pub const RPC_PONG_U32: u32 = 0x8430eaa7;
|
||||
|
||||
pub const RPC_CRYPTO_NONE_U32: u32 = 0;
|
||||
pub const RPC_CRYPTO_AES_U32: u32 = 1;
|
||||
|
||||
pub mod proxy_flags {
|
||||
pub const FLAG_HAS_AD_TAG: u32 = 1;
|
||||
pub const FLAG_NOT_ENCRYPTED: u32 = 0x2;
|
||||
pub const FLAG_HAS_AD_TAG2: u32 = 0x8;
|
||||
pub const FLAG_MAGIC: u32 = 0x1000;
|
||||
pub const FLAG_EXTMODE2: u32 = 0x20000;
|
||||
pub const FLAG_PAD: u32 = 0x8000000;
|
||||
pub const FLAG_INTERMEDIATE: u32 = 0x20000000;
|
||||
pub const FLAG_ABRIDGED: u32 = 0x40000000;
|
||||
pub const FLAG_QUICKACK: u32 = 0x80000000;
|
||||
}
|
||||
|
||||
pub mod rpc_crypto_flags {
|
||||
pub const USE_CRC32C: u32 = 0x800;
|
||||
}
|
||||
|
||||
pub const ME_CONNECT_TIMEOUT_SECS: u64 = 5;
|
||||
pub const ME_HANDSHAKE_TIMEOUT_SECS: u64 = 10;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/tls_size_constants_security_tests.rs"]
|
||||
mod tls_size_constants_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_proto_tag_roundtrip() {
|
||||
for tag in [ProtoTag::Abridged, ProtoTag::Intermediate, ProtoTag::Secure] {
|
||||
@@ -332,20 +403,20 @@ mod tests {
|
||||
assert_eq!(tag, parsed);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_proto_tag_values() {
|
||||
assert_eq!(ProtoTag::Abridged.to_bytes(), PROTO_TAG_ABRIDGED);
|
||||
assert_eq!(ProtoTag::Intermediate.to_bytes(), PROTO_TAG_INTERMEDIATE);
|
||||
assert_eq!(ProtoTag::Secure.to_bytes(), PROTO_TAG_SECURE);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_invalid_proto_tag() {
|
||||
assert!(ProtoTag::from_bytes([0, 0, 0, 0]).is_none());
|
||||
assert!(ProtoTag::from_bytes([0xff, 0xff, 0xff, 0xff]).is_none());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_datacenters_count() {
|
||||
assert_eq!(TG_DATACENTERS_V4.len(), 5);
|
||||
|
||||
@@ -22,7 +22,7 @@ impl FrameExtra {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
|
||||
/// Create with quickack flag set
|
||||
pub fn with_quickack() -> Self {
|
||||
Self {
|
||||
@@ -30,7 +30,7 @@ impl FrameExtra {
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Create with simple_ack flag set
|
||||
pub fn with_simple_ack() -> Self {
|
||||
Self {
|
||||
@@ -38,7 +38,7 @@ impl FrameExtra {
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Check if any flags are set
|
||||
pub fn has_flags(&self) -> bool {
|
||||
self.quickack || self.simple_ack || self.skip_send
|
||||
@@ -76,22 +76,22 @@ impl FrameMode {
|
||||
FrameMode::Abridged => 4,
|
||||
FrameMode::Intermediate => 4,
|
||||
FrameMode::SecureIntermediate => 4 + 3, // length + padding
|
||||
FrameMode::Full => 12 + 16, // header + max CBC padding
|
||||
FrameMode::Full => 12 + 16, // header + max CBC padding
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate message length for MTProto
|
||||
pub fn validate_message_length(len: usize) -> bool {
|
||||
use super::constants::{MIN_MSG_LEN, MAX_MSG_LEN, PADDING_FILLER};
|
||||
|
||||
use super::constants::{MAX_MSG_LEN, MIN_MSG_LEN, PADDING_FILLER};
|
||||
|
||||
(MIN_MSG_LEN..=MAX_MSG_LEN).contains(&len) && len.is_multiple_of(PADDING_FILLER.len())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_frame_extra_default() {
|
||||
let extra = FrameExtra::default();
|
||||
@@ -100,18 +100,18 @@ mod tests {
|
||||
assert!(!extra.skip_send);
|
||||
assert!(!extra.has_flags());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_frame_extra_flags() {
|
||||
let extra = FrameExtra::with_quickack();
|
||||
assert!(extra.quickack);
|
||||
assert!(extra.has_flags());
|
||||
|
||||
|
||||
let extra = FrameExtra::with_simple_ack();
|
||||
assert!(extra.simple_ack);
|
||||
assert!(extra.has_flags());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_validate_message_length() {
|
||||
assert!(validate_message_length(12)); // MIN_MSG_LEN
|
||||
@@ -119,4 +119,4 @@ mod tests {
|
||||
assert!(!validate_message_length(8)); // Too small
|
||||
assert!(!validate_message_length(13)); // Not aligned to 4
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,4 +12,4 @@ pub use frame::*;
|
||||
#[allow(unused_imports)]
|
||||
pub use obfuscation::*;
|
||||
#[allow(unused_imports)]
|
||||
pub use tls::*;
|
||||
pub use tls::*;
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use zeroize::Zeroize;
|
||||
use crate::crypto::{sha256, AesCtr};
|
||||
use super::constants::*;
|
||||
use crate::crypto::{AesCtr, sha256};
|
||||
use zeroize::Zeroize;
|
||||
|
||||
/// Obfuscation parameters from handshake
|
||||
///
|
||||
@@ -44,41 +44,40 @@ impl ObfuscationParams {
|
||||
let dec_prekey_iv = &handshake[SKIP_LEN..SKIP_LEN + PREKEY_LEN + IV_LEN];
|
||||
let dec_prekey = &dec_prekey_iv[..PREKEY_LEN];
|
||||
let dec_iv_bytes = &dec_prekey_iv[PREKEY_LEN..];
|
||||
|
||||
|
||||
let enc_prekey_iv: Vec<u8> = dec_prekey_iv.iter().rev().copied().collect();
|
||||
let enc_prekey = &enc_prekey_iv[..PREKEY_LEN];
|
||||
let enc_iv_bytes = &enc_prekey_iv[PREKEY_LEN..];
|
||||
|
||||
|
||||
for (username, secret) in secrets {
|
||||
let mut dec_key_input = Vec::with_capacity(PREKEY_LEN + secret.len());
|
||||
dec_key_input.extend_from_slice(dec_prekey);
|
||||
dec_key_input.extend_from_slice(secret);
|
||||
let decrypt_key = sha256(&dec_key_input);
|
||||
|
||||
|
||||
let decrypt_iv = u128::from_be_bytes(dec_iv_bytes.try_into().unwrap());
|
||||
|
||||
|
||||
let mut decryptor = AesCtr::new(&decrypt_key, decrypt_iv);
|
||||
let decrypted = decryptor.decrypt(handshake);
|
||||
|
||||
|
||||
let tag_bytes: [u8; 4] = decrypted[PROTO_TAG_POS..PROTO_TAG_POS + 4]
|
||||
.try_into()
|
||||
.unwrap();
|
||||
|
||||
|
||||
let proto_tag = match ProtoTag::from_bytes(tag_bytes) {
|
||||
Some(tag) => tag,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let dc_idx = i16::from_le_bytes(
|
||||
decrypted[DC_IDX_POS..DC_IDX_POS + 2].try_into().unwrap()
|
||||
);
|
||||
|
||||
|
||||
let dc_idx =
|
||||
i16::from_le_bytes(decrypted[DC_IDX_POS..DC_IDX_POS + 2].try_into().unwrap());
|
||||
|
||||
let mut enc_key_input = Vec::with_capacity(PREKEY_LEN + secret.len());
|
||||
enc_key_input.extend_from_slice(enc_prekey);
|
||||
enc_key_input.extend_from_slice(secret);
|
||||
let encrypt_key = sha256(&enc_key_input);
|
||||
let encrypt_iv = u128::from_be_bytes(enc_iv_bytes.try_into().unwrap());
|
||||
|
||||
|
||||
return Some((
|
||||
ObfuscationParams {
|
||||
decrypt_key,
|
||||
@@ -91,20 +90,20 @@ impl ObfuscationParams {
|
||||
username.clone(),
|
||||
));
|
||||
}
|
||||
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
|
||||
/// Create AES-CTR decryptor for client -> proxy direction
|
||||
pub fn create_decryptor(&self) -> AesCtr {
|
||||
AesCtr::new(&self.decrypt_key, self.decrypt_iv)
|
||||
}
|
||||
|
||||
|
||||
/// Create AES-CTR encryptor for proxy -> client direction
|
||||
pub fn create_encryptor(&self) -> AesCtr {
|
||||
AesCtr::new(&self.encrypt_key, self.encrypt_iv)
|
||||
}
|
||||
|
||||
|
||||
/// Get the combined encrypt key and IV for fast mode
|
||||
pub fn enc_key_iv(&self) -> Vec<u8> {
|
||||
let mut result = Vec::with_capacity(KEY_LEN + IV_LEN);
|
||||
@@ -120,7 +119,7 @@ pub fn generate_nonce<R: FnMut(usize) -> Vec<u8>>(mut random_bytes: R) -> [u8; H
|
||||
let nonce_vec = random_bytes(HANDSHAKE_LEN);
|
||||
let mut nonce = [0u8; HANDSHAKE_LEN];
|
||||
nonce.copy_from_slice(&nonce_vec);
|
||||
|
||||
|
||||
if is_valid_nonce(&nonce) {
|
||||
return nonce;
|
||||
}
|
||||
@@ -132,17 +131,17 @@ pub fn is_valid_nonce(nonce: &[u8; HANDSHAKE_LEN]) -> bool {
|
||||
if RESERVED_NONCE_FIRST_BYTES.contains(&nonce[0]) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
let first_four: [u8; 4] = nonce[..4].try_into().unwrap();
|
||||
if RESERVED_NONCE_BEGINNINGS.contains(&first_four) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
let continue_four: [u8; 4] = nonce[4..8].try_into().unwrap();
|
||||
if RESERVED_NONCE_CONTINUES.contains(&continue_four) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
@@ -153,7 +152,7 @@ pub fn prepare_tg_nonce(
|
||||
enc_key_iv: Option<&[u8]>,
|
||||
) {
|
||||
nonce[PROTO_TAG_POS..PROTO_TAG_POS + 4].copy_from_slice(&proto_tag.to_bytes());
|
||||
|
||||
|
||||
if let Some(key_iv) = enc_key_iv {
|
||||
let reversed: Vec<u8> = key_iv.iter().rev().copied().collect();
|
||||
nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN].copy_from_slice(&reversed);
|
||||
@@ -171,39 +170,39 @@ pub fn encrypt_nonce(nonce: &[u8; HANDSHAKE_LEN]) -> Vec<u8> {
|
||||
let key_iv = &nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN];
|
||||
let enc_key = sha256(key_iv);
|
||||
let enc_iv = u128::from_be_bytes(key_iv[..IV_LEN].try_into().unwrap());
|
||||
|
||||
|
||||
let mut encryptor = AesCtr::new(&enc_key, enc_iv);
|
||||
|
||||
|
||||
let mut result = nonce.to_vec();
|
||||
let encrypted_part = encryptor.encrypt(&nonce[PROTO_TAG_POS..]);
|
||||
result[PROTO_TAG_POS..].copy_from_slice(&encrypted_part);
|
||||
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_is_valid_nonce() {
|
||||
let mut valid = [0x42u8; HANDSHAKE_LEN];
|
||||
valid[4..8].copy_from_slice(&[1, 2, 3, 4]);
|
||||
assert!(is_valid_nonce(&valid));
|
||||
|
||||
|
||||
let mut invalid = [0x00u8; HANDSHAKE_LEN];
|
||||
invalid[0] = 0xef;
|
||||
assert!(!is_valid_nonce(&invalid));
|
||||
|
||||
|
||||
let mut invalid = [0x00u8; HANDSHAKE_LEN];
|
||||
invalid[..4].copy_from_slice(b"HEAD");
|
||||
assert!(!is_valid_nonce(&invalid));
|
||||
|
||||
|
||||
let mut invalid = [0x42u8; HANDSHAKE_LEN];
|
||||
invalid[4..8].copy_from_slice(&[0, 0, 0, 0]);
|
||||
assert!(!is_valid_nonce(&invalid));
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_generate_nonce() {
|
||||
let mut counter = 0u8;
|
||||
@@ -211,7 +210,7 @@ mod tests {
|
||||
counter = counter.wrapping_add(1);
|
||||
vec![counter; n]
|
||||
});
|
||||
|
||||
|
||||
assert!(is_valid_nonce(&nonce));
|
||||
assert_eq!(nonce.len(), HANDSHAKE_LEN);
|
||||
}
|
||||
|
||||
358
src/protocol/tests/tls_adversarial_tests.rs
Normal file
358
src/protocol/tests/tls_adversarial_tests.rs
Normal file
@@ -0,0 +1,358 @@
|
||||
use super::*;
|
||||
use crate::crypto::sha256_hmac;
|
||||
use std::time::Instant;
|
||||
|
||||
/// Helper to create a byte vector of specific length.
|
||||
fn make_garbage(len: usize) -> Vec<u8> {
|
||||
vec![0x42u8; len]
|
||||
}
|
||||
|
||||
/// Helper to create a valid-looking HMAC digest for test.
|
||||
fn make_digest(secret: &[u8], msg: &[u8], ts: u32) -> [u8; 32] {
|
||||
let mut hmac = sha256_hmac(secret, msg);
|
||||
let ts_bytes = ts.to_le_bytes();
|
||||
for i in 0..4 {
|
||||
hmac[28 + i] ^= ts_bytes[i];
|
||||
}
|
||||
hmac
|
||||
}
|
||||
|
||||
fn make_valid_tls_handshake_with_session_id(
|
||||
secret: &[u8],
|
||||
timestamp: u32,
|
||||
session_id: &[u8],
|
||||
) -> Vec<u8> {
|
||||
let session_id_len = session_id.len();
|
||||
let len = TLS_DIGEST_POS + TLS_DIGEST_LEN + 1 + session_id_len;
|
||||
let mut handshake = vec![0x42u8; len];
|
||||
|
||||
handshake[TLS_DIGEST_POS + TLS_DIGEST_LEN] = session_id_len as u8;
|
||||
let sid_start = TLS_DIGEST_POS + TLS_DIGEST_LEN + 1;
|
||||
handshake[sid_start..sid_start + session_id_len].copy_from_slice(session_id);
|
||||
handshake[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN].fill(0);
|
||||
|
||||
let digest = make_digest(secret, &handshake, timestamp);
|
||||
|
||||
handshake[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN].copy_from_slice(&digest);
|
||||
handshake
|
||||
}
|
||||
|
||||
fn make_valid_tls_handshake(secret: &[u8], timestamp: u32) -> Vec<u8> {
|
||||
make_valid_tls_handshake_with_session_id(secret, timestamp, &[0x42; 32])
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// Truncated Packet Tests (OWASP ASVS 5.1.4, 5.1.5)
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn validate_tls_handshake_truncated_10_bytes_rejected() {
|
||||
let secrets = vec![("user".to_string(), b"secret".to_vec())];
|
||||
let truncated = make_garbage(10);
|
||||
assert!(validate_tls_handshake(&truncated, &secrets, true).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_tls_handshake_truncated_at_digest_start_rejected() {
|
||||
let secrets = vec![("user".to_string(), b"secret".to_vec())];
|
||||
// TLS_DIGEST_POS = 11. 11 bytes should be rejected.
|
||||
let truncated = make_garbage(TLS_DIGEST_POS);
|
||||
assert!(validate_tls_handshake(&truncated, &secrets, true).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_tls_handshake_truncated_inside_digest_rejected() {
|
||||
let secrets = vec![("user".to_string(), b"secret".to_vec())];
|
||||
// TLS_DIGEST_POS + 16 (half digest)
|
||||
let truncated = make_garbage(TLS_DIGEST_POS + 16);
|
||||
assert!(validate_tls_handshake(&truncated, &secrets, true).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_sni_truncated_at_record_header_rejected() {
|
||||
let truncated = make_garbage(3);
|
||||
assert!(extract_sni_from_client_hello(&truncated).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_sni_truncated_at_handshake_header_rejected() {
|
||||
let mut truncated = vec![TLS_RECORD_HANDSHAKE, 0x03, 0x03, 0x00, 0x05];
|
||||
truncated.extend_from_slice(&[0x01, 0x00]); // ClientHello type but truncated length
|
||||
assert!(extract_sni_from_client_hello(&truncated).is_none());
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// Malformed Extension Parsing Tests
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn extract_sni_with_overlapping_extension_lengths_rejected() {
|
||||
let mut h = vec![0x16, 0x03, 0x03, 0x00, 0x60]; // Record header
|
||||
h.push(0x01); // Handshake type: ClientHello
|
||||
h.extend_from_slice(&[0x00, 0x00, 0x5C]); // Length: 92
|
||||
h.extend_from_slice(&[0x03, 0x03]); // Version
|
||||
h.extend_from_slice(&[0u8; 32]); // Random
|
||||
h.push(0); // Session ID length: 0
|
||||
h.extend_from_slice(&[0x00, 0x02, 0x13, 0x01]); // Cipher suites
|
||||
h.extend_from_slice(&[0x01, 0x00]); // Compression
|
||||
|
||||
// Extensions start
|
||||
h.extend_from_slice(&[0x00, 0x20]); // Total Extensions length: 32
|
||||
|
||||
// Extension 1: SNI (type 0)
|
||||
h.extend_from_slice(&[0x00, 0x00]);
|
||||
h.extend_from_slice(&[0x00, 0x40]); // Claimed len: 64 (OVERFLOWS total extensions len 32)
|
||||
h.extend_from_slice(&[0u8; 64]);
|
||||
|
||||
assert!(extract_sni_from_client_hello(&h).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_sni_with_infinite_loop_potential_extension_rejected() {
|
||||
let mut h = vec![0x16, 0x03, 0x03, 0x00, 0x60]; // Record header
|
||||
h.push(0x01); // Handshake type: ClientHello
|
||||
h.extend_from_slice(&[0x00, 0x00, 0x5C]); // Length: 92
|
||||
h.extend_from_slice(&[0x03, 0x03]); // Version
|
||||
h.extend_from_slice(&[0u8; 32]); // Random
|
||||
h.push(0); // Session ID length: 0
|
||||
h.extend_from_slice(&[0x00, 0x02, 0x13, 0x01]); // Cipher suites
|
||||
h.extend_from_slice(&[0x01, 0x00]); // Compression
|
||||
|
||||
// Extensions start
|
||||
h.extend_from_slice(&[0x00, 0x10]); // Total Extensions length: 16
|
||||
|
||||
// Extension: zero length but claims more?
|
||||
// If our parser didn't advance, it might loop.
|
||||
// Telemt uses `pos += 4 + elen;` so it always advances.
|
||||
h.extend_from_slice(&[0x12, 0x34]); // Unknown type
|
||||
h.extend_from_slice(&[0x00, 0x00]); // Length 0
|
||||
|
||||
// Fill the rest with garbage
|
||||
h.extend_from_slice(&[0x42; 12]);
|
||||
|
||||
// We expect it to finish without SNI found
|
||||
assert!(extract_sni_from_client_hello(&h).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_sni_with_invalid_hostname_rejected() {
|
||||
let host = b"invalid_host!%^";
|
||||
let mut sni = Vec::new();
|
||||
sni.extend_from_slice(&((host.len() + 3) as u16).to_be_bytes());
|
||||
sni.push(0);
|
||||
sni.extend_from_slice(&(host.len() as u16).to_be_bytes());
|
||||
sni.extend_from_slice(host);
|
||||
|
||||
let mut h = vec![0x16, 0x03, 0x03, 0x00, 0x60]; // Record header
|
||||
h.push(0x01); // ClientHello
|
||||
h.extend_from_slice(&[0x00, 0x00, 0x5C]);
|
||||
h.extend_from_slice(&[0x03, 0x03]);
|
||||
h.extend_from_slice(&[0u8; 32]);
|
||||
h.push(0);
|
||||
h.extend_from_slice(&[0x00, 0x02, 0x13, 0x01]);
|
||||
h.extend_from_slice(&[0x01, 0x00]);
|
||||
|
||||
let mut ext = Vec::new();
|
||||
ext.extend_from_slice(&0x0000u16.to_be_bytes());
|
||||
ext.extend_from_slice(&(sni.len() as u16).to_be_bytes());
|
||||
ext.extend_from_slice(&sni);
|
||||
|
||||
h.extend_from_slice(&(ext.len() as u16).to_be_bytes());
|
||||
h.extend_from_slice(&ext);
|
||||
|
||||
assert!(
|
||||
extract_sni_from_client_hello(&h).is_none(),
|
||||
"Invalid SNI hostname must be rejected"
|
||||
);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// Timing Neutrality Tests (OWASP ASVS 5.1.7)
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn validate_tls_handshake_timing_neutrality() {
|
||||
let secret = b"timing_test_secret_32_bytes_long_";
|
||||
let secrets = vec![("u".to_string(), secret.to_vec())];
|
||||
|
||||
let mut base = vec![0x42u8; 100];
|
||||
base[TLS_DIGEST_POS + TLS_DIGEST_LEN] = 32;
|
||||
|
||||
const ITER: usize = 600;
|
||||
const ROUNDS: usize = 7;
|
||||
|
||||
let mut per_round_avg_diff_ns = Vec::with_capacity(ROUNDS);
|
||||
|
||||
for round in 0..ROUNDS {
|
||||
let mut success_h = base.clone();
|
||||
let mut fail_h = base.clone();
|
||||
|
||||
let start_success = Instant::now();
|
||||
for _ in 0..ITER {
|
||||
let digest = make_digest(secret, &success_h, 0);
|
||||
success_h[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN].copy_from_slice(&digest);
|
||||
let _ = validate_tls_handshake_at_time(&success_h, &secrets, true, 0);
|
||||
}
|
||||
let success_elapsed = start_success.elapsed();
|
||||
|
||||
let start_fail = Instant::now();
|
||||
for i in 0..ITER {
|
||||
let mut digest = make_digest(secret, &fail_h, 0);
|
||||
let flip_idx = (i + round) % (TLS_DIGEST_LEN - 4);
|
||||
digest[flip_idx] ^= 0xFF;
|
||||
fail_h[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN].copy_from_slice(&digest);
|
||||
let _ = validate_tls_handshake_at_time(&fail_h, &secrets, true, 0);
|
||||
}
|
||||
let fail_elapsed = start_fail.elapsed();
|
||||
|
||||
let diff = if success_elapsed > fail_elapsed {
|
||||
success_elapsed - fail_elapsed
|
||||
} else {
|
||||
fail_elapsed - success_elapsed
|
||||
};
|
||||
per_round_avg_diff_ns.push(diff.as_nanos() as f64 / ITER as f64);
|
||||
}
|
||||
|
||||
per_round_avg_diff_ns.sort_by(|a, b| a.partial_cmp(b).unwrap());
|
||||
let median_avg_diff_ns = per_round_avg_diff_ns[ROUNDS / 2];
|
||||
|
||||
// Keep this as a coarse side-channel guard only; noisy shared CI hosts can
|
||||
// introduce microsecond-level jitter that should not fail deterministic suites.
|
||||
assert!(
|
||||
median_avg_diff_ns < 50_000.0,
|
||||
"Median timing delta too large: {} ns/iter",
|
||||
median_avg_diff_ns
|
||||
);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// Adversarial Fingerprinting / Active Probing Tests
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn is_tls_handshake_robustness_against_probing() {
|
||||
// Valid TLS 1.0 ClientHello
|
||||
assert!(is_tls_handshake(&[0x16, 0x03, 0x01]));
|
||||
// Valid TLS 1.2/1.3 ClientHello (Legacy Record Layer)
|
||||
assert!(is_tls_handshake(&[0x16, 0x03, 0x03]));
|
||||
|
||||
// Invalid record type but matching version
|
||||
assert!(!is_tls_handshake(&[0x17, 0x03, 0x03]));
|
||||
// Plaintext HTTP request
|
||||
assert!(!is_tls_handshake(b"GET / HTTP/1.1"));
|
||||
// Short garbage
|
||||
assert!(!is_tls_handshake(&[0x16, 0x03]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_tls_handshake_at_time_strict_boundary() {
|
||||
let secret = b"strict_boundary_secret_32_bytes_";
|
||||
let secrets = vec![("u".to_string(), secret.to_vec())];
|
||||
let now: i64 = 1_000_000_000;
|
||||
|
||||
// Boundary: exactly TIME_SKEW_MAX (120s past)
|
||||
let ts_past = (now - TIME_SKEW_MAX) as u32;
|
||||
let h = make_valid_tls_handshake_with_session_id(secret, ts_past, &[0x42; 32]);
|
||||
assert!(validate_tls_handshake_at_time(&h, &secrets, false, now).is_some());
|
||||
|
||||
// Boundary + 1s: should be rejected
|
||||
let ts_too_past = (now - TIME_SKEW_MAX - 1) as u32;
|
||||
let h2 = make_valid_tls_handshake_with_session_id(secret, ts_too_past, &[0x42; 32]);
|
||||
assert!(validate_tls_handshake_at_time(&h2, &secrets, false, now).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_sni_with_duplicate_extensions_rejected() {
|
||||
// Construct a ClientHello with TWO SNI extensions
|
||||
let host1 = b"first.com";
|
||||
let mut sni1 = Vec::new();
|
||||
sni1.extend_from_slice(&((host1.len() + 3) as u16).to_be_bytes());
|
||||
sni1.push(0);
|
||||
sni1.extend_from_slice(&(host1.len() as u16).to_be_bytes());
|
||||
sni1.extend_from_slice(host1);
|
||||
|
||||
let host2 = b"second.com";
|
||||
let mut sni2 = Vec::new();
|
||||
sni2.extend_from_slice(&((host2.len() + 3) as u16).to_be_bytes());
|
||||
sni2.push(0);
|
||||
sni2.extend_from_slice(&(host2.len() as u16).to_be_bytes());
|
||||
sni2.extend_from_slice(host2);
|
||||
|
||||
let mut ext = Vec::new();
|
||||
// Ext 1: SNI
|
||||
ext.extend_from_slice(&0x0000u16.to_be_bytes());
|
||||
ext.extend_from_slice(&(sni1.len() as u16).to_be_bytes());
|
||||
ext.extend_from_slice(&sni1);
|
||||
// Ext 2: SNI again
|
||||
ext.extend_from_slice(&0x0000u16.to_be_bytes());
|
||||
ext.extend_from_slice(&(sni2.len() as u16).to_be_bytes());
|
||||
ext.extend_from_slice(&sni2);
|
||||
|
||||
let mut body = Vec::new();
|
||||
body.extend_from_slice(&[0x03, 0x03]);
|
||||
body.extend_from_slice(&[0u8; 32]);
|
||||
body.push(0);
|
||||
body.extend_from_slice(&[0x00, 0x02, 0x13, 0x01]);
|
||||
body.extend_from_slice(&[0x01, 0x00]);
|
||||
body.extend_from_slice(&(ext.len() as u16).to_be_bytes());
|
||||
body.extend_from_slice(&ext);
|
||||
|
||||
let mut handshake = Vec::new();
|
||||
handshake.push(0x01);
|
||||
let body_len = (body.len() as u32).to_be_bytes();
|
||||
handshake.extend_from_slice(&body_len[1..4]);
|
||||
handshake.extend_from_slice(&body);
|
||||
|
||||
let mut h = Vec::new();
|
||||
h.push(0x16);
|
||||
h.extend_from_slice(&[0x03, 0x03]);
|
||||
h.extend_from_slice(&(handshake.len() as u16).to_be_bytes());
|
||||
h.extend_from_slice(&handshake);
|
||||
|
||||
// Duplicate SNI extensions are ambiguous and must fail closed.
|
||||
assert!(extract_sni_from_client_hello(&h).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_alpn_with_malformed_list_rejected() {
|
||||
let mut alpn_payload = Vec::new();
|
||||
alpn_payload.extend_from_slice(&0x0005u16.to_be_bytes()); // Total len 5
|
||||
alpn_payload.push(10); // Labeled len 10 (OVERFLOWS total 5)
|
||||
alpn_payload.extend_from_slice(b"h2");
|
||||
|
||||
let mut ext = Vec::new();
|
||||
ext.extend_from_slice(&0x0010u16.to_be_bytes()); // Type: ALPN (16)
|
||||
ext.extend_from_slice(&(alpn_payload.len() as u16).to_be_bytes());
|
||||
ext.extend_from_slice(&alpn_payload);
|
||||
|
||||
let mut h = vec![
|
||||
0x16, 0x03, 0x03, 0x00, 0x40, 0x01, 0x00, 0x00, 0x3C, 0x03, 0x03,
|
||||
];
|
||||
h.extend_from_slice(&[0u8; 32]);
|
||||
h.push(0);
|
||||
h.extend_from_slice(&[0x00, 0x02, 0x13, 0x01, 0x01, 0x00]);
|
||||
h.extend_from_slice(&(ext.len() as u16).to_be_bytes());
|
||||
h.extend_from_slice(&ext);
|
||||
|
||||
let res = extract_alpn_from_client_hello(&h);
|
||||
assert!(
|
||||
res.is_empty(),
|
||||
"Malformed ALPN list must return empty or fail"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_sni_with_huge_extension_header_rejected() {
|
||||
let mut h = vec![0x16, 0x03, 0x03, 0x00, 0x00]; // Record header
|
||||
h.push(0x01); // ClientHello
|
||||
h.extend_from_slice(&[0x00, 0xFF, 0xFF]); // Huge length (65535) - overflows record
|
||||
h.extend_from_slice(&[0x03, 0x03]);
|
||||
h.extend_from_slice(&[0u8; 32]);
|
||||
h.push(0);
|
||||
h.extend_from_slice(&[0x00, 0x02, 0x13, 0x01, 0x01, 0x00]);
|
||||
|
||||
// Extensions start
|
||||
h.extend_from_slice(&[0xFF, 0xFF]); // Total extensions: 65535 (OVERFLOWS everything)
|
||||
|
||||
assert!(extract_sni_from_client_hello(&h).is_none());
|
||||
}
|
||||
210
src/protocol/tests/tls_fuzz_security_tests.rs
Normal file
210
src/protocol/tests/tls_fuzz_security_tests.rs
Normal file
@@ -0,0 +1,210 @@
|
||||
use super::*;
|
||||
use crate::crypto::sha256_hmac;
|
||||
use std::panic::catch_unwind;
|
||||
|
||||
fn make_valid_tls_handshake_with_session_id(
|
||||
secret: &[u8],
|
||||
timestamp: u32,
|
||||
session_id: &[u8],
|
||||
) -> Vec<u8> {
|
||||
let session_id_len = session_id.len();
|
||||
assert!(session_id_len <= u8::MAX as usize);
|
||||
|
||||
let len = TLS_DIGEST_POS + TLS_DIGEST_LEN + 1 + session_id_len;
|
||||
let mut handshake = vec![0x42u8; len];
|
||||
handshake[TLS_DIGEST_POS + TLS_DIGEST_LEN] = session_id_len as u8;
|
||||
let sid_start = TLS_DIGEST_POS + TLS_DIGEST_LEN + 1;
|
||||
handshake[sid_start..sid_start + session_id_len].copy_from_slice(session_id);
|
||||
handshake[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN].fill(0);
|
||||
|
||||
let mut digest = sha256_hmac(secret, &handshake);
|
||||
let ts = timestamp.to_le_bytes();
|
||||
for idx in 0..4 {
|
||||
digest[28 + idx] ^= ts[idx];
|
||||
}
|
||||
|
||||
handshake[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN].copy_from_slice(&digest);
|
||||
handshake
|
||||
}
|
||||
|
||||
fn make_valid_client_hello_record(host: &str, alpn_protocols: &[&[u8]]) -> Vec<u8> {
|
||||
let mut body = Vec::new();
|
||||
body.extend_from_slice(&TLS_VERSION);
|
||||
body.extend_from_slice(&[0u8; 32]);
|
||||
body.push(0);
|
||||
body.extend_from_slice(&2u16.to_be_bytes());
|
||||
body.extend_from_slice(&[0x13, 0x01]);
|
||||
body.push(1);
|
||||
body.push(0);
|
||||
|
||||
let mut ext_blob = Vec::new();
|
||||
|
||||
let host_bytes = host.as_bytes();
|
||||
let mut sni_payload = Vec::new();
|
||||
sni_payload.extend_from_slice(&((host_bytes.len() + 3) as u16).to_be_bytes());
|
||||
sni_payload.push(0);
|
||||
sni_payload.extend_from_slice(&(host_bytes.len() as u16).to_be_bytes());
|
||||
sni_payload.extend_from_slice(host_bytes);
|
||||
ext_blob.extend_from_slice(&0x0000u16.to_be_bytes());
|
||||
ext_blob.extend_from_slice(&(sni_payload.len() as u16).to_be_bytes());
|
||||
ext_blob.extend_from_slice(&sni_payload);
|
||||
|
||||
if !alpn_protocols.is_empty() {
|
||||
let mut alpn_list = Vec::new();
|
||||
for proto in alpn_protocols {
|
||||
alpn_list.push(proto.len() as u8);
|
||||
alpn_list.extend_from_slice(proto);
|
||||
}
|
||||
let mut alpn_data = Vec::new();
|
||||
alpn_data.extend_from_slice(&(alpn_list.len() as u16).to_be_bytes());
|
||||
alpn_data.extend_from_slice(&alpn_list);
|
||||
|
||||
ext_blob.extend_from_slice(&0x0010u16.to_be_bytes());
|
||||
ext_blob.extend_from_slice(&(alpn_data.len() as u16).to_be_bytes());
|
||||
ext_blob.extend_from_slice(&alpn_data);
|
||||
}
|
||||
|
||||
body.extend_from_slice(&(ext_blob.len() as u16).to_be_bytes());
|
||||
body.extend_from_slice(&ext_blob);
|
||||
|
||||
let mut handshake = Vec::new();
|
||||
handshake.push(0x01);
|
||||
let body_len = (body.len() as u32).to_be_bytes();
|
||||
handshake.extend_from_slice(&body_len[1..4]);
|
||||
handshake.extend_from_slice(&body);
|
||||
|
||||
let mut record = Vec::new();
|
||||
record.push(TLS_RECORD_HANDSHAKE);
|
||||
record.extend_from_slice(&[0x03, 0x01]);
|
||||
record.extend_from_slice(&(handshake.len() as u16).to_be_bytes());
|
||||
record.extend_from_slice(&handshake);
|
||||
record
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn client_hello_fuzz_corpus_never_panics_or_accepts_corruption() {
|
||||
let valid = make_valid_client_hello_record("example.com", &[b"h2", b"http/1.1"]);
|
||||
assert_eq!(
|
||||
extract_sni_from_client_hello(&valid).as_deref(),
|
||||
Some("example.com")
|
||||
);
|
||||
assert_eq!(
|
||||
extract_alpn_from_client_hello(&valid),
|
||||
vec![b"h2".to_vec(), b"http/1.1".to_vec()]
|
||||
);
|
||||
assert!(
|
||||
extract_sni_from_client_hello(&make_valid_client_hello_record("127.0.0.1", &[])).is_none(),
|
||||
"literal IP hostnames must be rejected"
|
||||
);
|
||||
|
||||
let mut corpus = vec![
|
||||
Vec::new(),
|
||||
vec![0x16, 0x03, 0x03],
|
||||
valid[..9].to_vec(),
|
||||
valid[..valid.len() - 1].to_vec(),
|
||||
];
|
||||
|
||||
let mut wrong_type = valid.clone();
|
||||
wrong_type[0] = 0x15;
|
||||
corpus.push(wrong_type);
|
||||
|
||||
let mut wrong_handshake = valid.clone();
|
||||
wrong_handshake[5] = 0x02;
|
||||
corpus.push(wrong_handshake);
|
||||
|
||||
let mut wrong_length = valid.clone();
|
||||
wrong_length[3] ^= 0x7f;
|
||||
corpus.push(wrong_length);
|
||||
|
||||
for (idx, input) in corpus.iter().enumerate() {
|
||||
assert!(catch_unwind(|| extract_sni_from_client_hello(input)).is_ok());
|
||||
assert!(catch_unwind(|| extract_alpn_from_client_hello(input)).is_ok());
|
||||
|
||||
if idx == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
assert!(
|
||||
extract_sni_from_client_hello(input).is_none(),
|
||||
"corpus item {idx} must fail closed for SNI"
|
||||
);
|
||||
assert!(
|
||||
extract_alpn_from_client_hello(input).is_empty(),
|
||||
"corpus item {idx} must fail closed for ALPN"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tls_handshake_fuzz_corpus_never_panics_and_rejects_digest_mutations() {
|
||||
let secret = b"tls_fuzz_security_secret";
|
||||
let now: i64 = 1_700_000_000;
|
||||
let base = make_valid_tls_handshake_with_session_id(secret, now as u32, &[0x42; 32]);
|
||||
let secrets = vec![("fuzz-user".to_string(), secret.to_vec())];
|
||||
|
||||
assert!(validate_tls_handshake_at_time(&base, &secrets, false, now).is_some());
|
||||
|
||||
let mut corpus = Vec::new();
|
||||
|
||||
let mut truncated = base.clone();
|
||||
truncated.truncate(TLS_DIGEST_POS + 16);
|
||||
corpus.push(truncated);
|
||||
|
||||
let mut digest_flip = base.clone();
|
||||
digest_flip[TLS_DIGEST_POS + 7] ^= 0x80;
|
||||
corpus.push(digest_flip);
|
||||
|
||||
let mut session_id_len_overflow = base.clone();
|
||||
session_id_len_overflow[TLS_DIGEST_POS + TLS_DIGEST_LEN] = 33;
|
||||
corpus.push(session_id_len_overflow);
|
||||
|
||||
let mut timestamp_far_past = base.clone();
|
||||
timestamp_far_past[TLS_DIGEST_POS + 28..TLS_DIGEST_POS + 32]
|
||||
.copy_from_slice(&((now - i64::from(TIME_SKEW_MAX) - 1) as u32).to_le_bytes());
|
||||
corpus.push(timestamp_far_past);
|
||||
|
||||
let mut timestamp_far_future = base.clone();
|
||||
timestamp_far_future[TLS_DIGEST_POS + 28..TLS_DIGEST_POS + 32]
|
||||
.copy_from_slice(&((now - TIME_SKEW_MIN + 1) as u32).to_le_bytes());
|
||||
corpus.push(timestamp_far_future);
|
||||
|
||||
let mut seed = 0xA5A5_5A5A_F00D_BAAD_u64;
|
||||
for _ in 0..32 {
|
||||
let mut mutated = base.clone();
|
||||
for _ in 0..2 {
|
||||
seed = seed
|
||||
.wrapping_mul(2862933555777941757)
|
||||
.wrapping_add(3037000493);
|
||||
let idx = TLS_DIGEST_POS + (seed as usize % TLS_DIGEST_LEN);
|
||||
mutated[idx] ^= ((seed >> 17) as u8).wrapping_add(1);
|
||||
}
|
||||
corpus.push(mutated);
|
||||
}
|
||||
|
||||
for (idx, handshake) in corpus.iter().enumerate() {
|
||||
let result =
|
||||
catch_unwind(|| validate_tls_handshake_at_time(handshake, &secrets, false, now));
|
||||
assert!(result.is_ok(), "corpus item {idx} must not panic");
|
||||
assert!(
|
||||
result.unwrap().is_none(),
|
||||
"corpus item {idx} must fail closed"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tls_boot_time_acceptance_is_capped_by_replay_window() {
|
||||
let secret = b"tls_boot_time_cap_secret";
|
||||
let secrets = vec![("boot-user".to_string(), secret.to_vec())];
|
||||
let boot_ts = 1u32;
|
||||
let handshake = make_valid_tls_handshake_with_session_id(secret, boot_ts, &[0x42; 32]);
|
||||
|
||||
assert!(
|
||||
validate_tls_handshake_with_replay_window(&handshake, &secrets, false, 300).is_some(),
|
||||
"boot-time timestamp should be accepted while replay window permits it"
|
||||
);
|
||||
assert!(
|
||||
validate_tls_handshake_with_replay_window(&handshake, &secrets, false, 0).is_none(),
|
||||
"boot-time timestamp must be rejected when replay window disables the bypass"
|
||||
);
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user