mirror of
https://github.com/telemt/telemt.git
synced 2026-04-14 17:14:09 +03:00
Compare commits
783 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e7bdc80956 | ||
|
|
d641137537 | ||
|
|
4fd22b3219 | ||
|
|
fca0e3f619 | ||
|
|
9401c46727 | ||
|
|
6b3697ee87 | ||
|
|
c08160600e | ||
|
|
cd5c60ce1e | ||
|
|
ae1c97e27a | ||
|
|
cfee7de66b | ||
|
|
c942c492ad | ||
|
|
0e4be43b2b | ||
|
|
7eb2b60855 | ||
|
|
373ae3281e | ||
|
|
178630e3bf | ||
|
|
67f307cd43 | ||
|
|
ca2eaa9ead | ||
|
|
3c78daea0c | ||
|
|
d2baa8e721 | ||
|
|
a0cf4b4713 | ||
|
|
1bd249b0a9 | ||
|
|
2f47ec5797 | ||
|
|
80f3661b8e | ||
|
|
32eeb4a98c | ||
|
|
a74cc14ed9 | ||
|
|
5f77f83b48 | ||
|
|
d543dbca92 | ||
|
|
02f9d59f5a | ||
|
|
7b745bc7bc | ||
|
|
5ac0ef1ffd | ||
|
|
e1f3efb619 | ||
|
|
508eea0131 | ||
|
|
9e7f80b9b3 | ||
|
|
ee2def2e62 | ||
|
|
258191ab87 | ||
|
|
27e6dec018 | ||
|
|
26323dbebf | ||
|
|
484137793f | ||
|
|
24713feddc | ||
|
|
93f58524d1 | ||
|
|
0ff2e95e49 | ||
|
|
89222e7123 | ||
|
|
2468ee15e7 | ||
|
|
3440aa9fcd | ||
|
|
ce9698d39b | ||
|
|
ddfe7c5cfa | ||
|
|
01893f3712 | ||
|
|
8ae741ec72 | ||
|
|
6856466cef | ||
|
|
68292fbd26 | ||
|
|
e90c42ae68 | ||
|
|
9f9a5dce0d | ||
|
|
6739cd8d01 | ||
|
|
6cc8d9cb00 | ||
|
|
ce375b62e4 | ||
|
|
95971ac62c | ||
|
|
4ea2226dcd | ||
|
|
d752a440e5 | ||
|
|
5ce2ee2dae | ||
|
|
6fd9f0595d | ||
|
|
fcdd8a9796 | ||
|
|
640468d4e7 | ||
|
|
02fe89f7d0 | ||
|
|
24df865503 | ||
|
|
e9f8c79498 | ||
|
|
24ff75701e | ||
|
|
4221230969 | ||
|
|
d87196c105 | ||
|
|
da89415961 | ||
|
|
2d98ebf3c3 | ||
|
|
fb5e9947bd | ||
|
|
2ea85c00d3 | ||
|
|
2a3b6b917f | ||
|
|
83ed9065b0 | ||
|
|
44b825edf5 | ||
|
|
487e95a66e | ||
|
|
c465c200c4 | ||
|
|
d7716ad875 | ||
|
|
edce194948 | ||
|
|
13fdff750d | ||
|
|
bdcf110c87 | ||
|
|
dd12997744 | ||
|
|
fc160913bf | ||
|
|
92c22ef16d | ||
|
|
aff22d0855 | ||
|
|
b3d3bca15a | ||
|
|
92f38392eb | ||
|
|
30ef8df1b3 | ||
|
|
2e174adf16 | ||
|
|
4e803b1412 | ||
|
|
9b174318ce | ||
|
|
99edcbe818 | ||
|
|
ef7dc2b80f | ||
|
|
691607f269 | ||
|
|
55561a23bc | ||
|
|
f32c34f126 | ||
|
|
8f3bdaec2c | ||
|
|
69b02caf77 | ||
|
|
3854955069 | ||
|
|
9b84fc7a5b | ||
|
|
e7cb9238dc | ||
|
|
0e2cbe6178 | ||
|
|
cd076aeeeb | ||
|
|
d683faf922 | ||
|
|
0494f8ac8b | ||
|
|
48ce59900e | ||
|
|
84e95fd229 | ||
|
|
a80be78345 | ||
|
|
64130dd02e | ||
|
|
d62a6e0417 | ||
|
|
3260746785 | ||
|
|
8066ea2163 | ||
|
|
813f1df63e | ||
|
|
09bdafa718 | ||
|
|
fb0f75df43 | ||
|
|
39255df549 | ||
|
|
456495fd62 | ||
|
|
83cadc0bf3 | ||
|
|
0b1a8cd3f8 | ||
|
|
565b4ee923 | ||
|
|
7a9c1e79c2 | ||
|
|
02c6af4912 | ||
|
|
8ba4dea59f | ||
|
|
ccfda10713 | ||
|
|
bd1327592e | ||
|
|
30b22fe2bf | ||
|
|
651f257a5d | ||
|
|
a9209fd3c7 | ||
|
|
4ae4ca8ca8 | ||
|
|
8be1ddc0d8 | ||
|
|
b55fa5ec8f | ||
|
|
16c6ce850e | ||
|
|
12251e730f | ||
|
|
925b10f9fc | ||
|
|
306b653318 | ||
|
|
8791a52b7e | ||
|
|
0d9470a840 | ||
|
|
0d320c20e0 | ||
|
|
9b3ba2e1c6 | ||
|
|
dbadbf0221 | ||
|
|
173624c838 | ||
|
|
de2047adf2 | ||
|
|
5df2fe9f97 | ||
|
|
2510ebaa79 | ||
|
|
314f30a434 | ||
|
|
c86a511638 | ||
|
|
f1efaf4491 | ||
|
|
716b4adef2 | ||
|
|
5876623bb0 | ||
|
|
6b9c7f7862 | ||
|
|
7ea6387278 | ||
|
|
4c2bc2f41f | ||
|
|
c86f35f059 | ||
|
|
3492566842 | ||
|
|
349bbbb8fa | ||
|
|
ead08981e7 | ||
|
|
068cf825b9 | ||
|
|
7269dfbdc5 | ||
|
|
533708f885 | ||
|
|
5e93ce258f | ||
|
|
1236505502 | ||
|
|
f7d451e689 | ||
|
|
e11da6d2ae | ||
|
|
d31b4cd6c8 | ||
|
|
f4ec6bb303 | ||
|
|
a6132bac38 | ||
|
|
624870109e | ||
|
|
cdf829de91 | ||
|
|
6ef51dbfb0 | ||
|
|
af5f0b9692 | ||
|
|
bd0dcfff15 | ||
|
|
ec4e48808e | ||
|
|
c293901669 | ||
|
|
f4e5a08614 | ||
|
|
430a0ae6b4 | ||
|
|
53d93880ad | ||
|
|
1706698a83 | ||
|
|
cb0832b803 | ||
|
|
c01ca40b6d | ||
|
|
cfec6dbb3c | ||
|
|
1fe1acadd4 | ||
|
|
225fc3e4ea | ||
|
|
4a0d88ad43 | ||
|
|
58ff0c7971 | ||
|
|
7d39bf1698 | ||
|
|
3b8eea762b | ||
|
|
07ec84d071 | ||
|
|
235642459a | ||
|
|
3799fc13c4 | ||
|
|
71261522bd | ||
|
|
762deac511 | ||
|
|
4300720d35 | ||
|
|
b7a8e759eb | ||
|
|
1a68dc1c2d | ||
|
|
a6d22e8a57 | ||
|
|
9477103f89 | ||
|
|
e589891706 | ||
|
|
fad4b652c4 | ||
|
|
96bfc223fe | ||
|
|
265b9a5f11 | ||
|
|
74ad9037de | ||
|
|
49f4a7bb22 | ||
|
|
ac453638b8 | ||
|
|
e7773b2bda | ||
|
|
6f1980dfd7 | ||
|
|
427fbef50f | ||
|
|
08609f4b6d | ||
|
|
501d802b8d | ||
|
|
e8ff39d2ae | ||
|
|
6c1b837d5b | ||
|
|
b112908c86 | ||
|
|
1e400d4cc2 | ||
|
|
a11c8b659b | ||
|
|
bc432f06e2 | ||
|
|
338636ede6 | ||
|
|
c05779208e | ||
|
|
7ba21ec5a8 | ||
|
|
d997c0b216 | ||
|
|
62cf4f0a1c | ||
|
|
e710fefed2 | ||
|
|
edef06edb5 | ||
|
|
7a0b015e65 | ||
|
|
8b2ec35c46 | ||
|
|
d324d84ec7 | ||
|
|
47b12f9489 | ||
|
|
a5967d0ca3 | ||
|
|
44cdfd4b23 | ||
|
|
25ffcf6081 | ||
|
|
60322807b6 | ||
|
|
ed93b0a030 | ||
|
|
2370c8d5e4 | ||
|
|
a3197b0fe1 | ||
|
|
e27ef04c3d | ||
|
|
cf7e2ebf4b | ||
|
|
685bfafe74 | ||
|
|
0f6fcf49a7 | ||
|
|
036f0e1569 | ||
|
|
291c22583f | ||
|
|
ee5b01bb31 | ||
|
|
ccacf78890 | ||
|
|
42db1191a8 | ||
|
|
9ce26d16cb | ||
|
|
12e68f805f | ||
|
|
62bf31fc73 | ||
|
|
29d4636249 | ||
|
|
9afaa28add | ||
|
|
6c12af2b94 | ||
|
|
8b39a4ef6d | ||
|
|
fa2423dadf | ||
|
|
449a87d2e3 | ||
|
|
a61882af6e | ||
|
|
bf11ebbaa3 | ||
|
|
e0d5561095 | ||
|
|
6b8aa7270e | ||
|
|
372f477927 | ||
|
|
05edbab06c | ||
|
|
3d9660f83e | ||
|
|
ac064fe773 | ||
|
|
eba158ff8b | ||
|
|
54ee6ff810 | ||
|
|
6d6cd30227 | ||
|
|
60231224ac | ||
|
|
144f81c473 | ||
|
|
04e6135935 | ||
|
|
4eebb4feb2 | ||
|
|
1f255d0aa4 | ||
|
|
9d2ff25bf5 | ||
|
|
7782336264 | ||
|
|
92a3529733 | ||
|
|
8ce8348cd5 | ||
|
|
e25b7f5ff8 | ||
|
|
d7182ae817 | ||
|
|
97f2dc8489 | ||
|
|
fb1f85559c | ||
|
|
da684b11fe | ||
|
|
896e129155 | ||
|
|
7ead0cd753 | ||
|
|
6cf9687dd6 | ||
|
|
4e30a4999c | ||
|
|
4af40f7121 | ||
|
|
1e4ba2eb56 | ||
|
|
eb921e2b17 | ||
|
|
76f1b51018 | ||
|
|
03ce267865 | ||
|
|
a6bfa3309e | ||
|
|
79a3720fd5 | ||
|
|
89543aed35 | ||
|
|
06292ff833 | ||
|
|
427294b103 | ||
|
|
fed9346444 | ||
|
|
f40b645c05 | ||
|
|
a66d5d56bb | ||
|
|
1b1bdfe99a | ||
|
|
49fc11ddfa | ||
|
|
5558900c44 | ||
|
|
5b1d976392 | ||
|
|
206f87fe64 | ||
|
|
5a09d30e1c | ||
|
|
f83e23c521 | ||
|
|
f9e9ddd0f7 | ||
|
|
6b8619d3c9 | ||
|
|
618b7a1837 | ||
|
|
16f166cec8 | ||
|
|
6efcbe9bbf | ||
|
|
e5ad27e26e | ||
|
|
53ec96b040 | ||
|
|
c6c3d71b08 | ||
|
|
e9a4281015 | ||
|
|
866c2fbd96 | ||
|
|
086c85d851 | ||
|
|
ce4e21c996 | ||
|
|
25ab79406f | ||
|
|
7538967d3c | ||
|
|
4a95f6d195 | ||
|
|
7d7ef84868 | ||
|
|
692d9476b9 | ||
|
|
b00b87032b | ||
|
|
ee07325eba | ||
|
|
1b3a17aedc | ||
|
|
6fdb568381 | ||
|
|
bb97ff0df9 | ||
|
|
b1cd7f9727 | ||
|
|
c13c1cf7e3 | ||
|
|
d2f08fb707 | ||
|
|
2356ae5584 | ||
|
|
429fa63c95 | ||
|
|
50e15896b3 | ||
|
|
09f56dede2 | ||
|
|
d9ae7bb044 | ||
|
|
d6214c6bbf | ||
|
|
3d3ddd37d7 | ||
|
|
1d71b7e90c | ||
|
|
8ba7bc9052 | ||
|
|
3397d82924 | ||
|
|
78c45626e1 | ||
|
|
68c3abee6c | ||
|
|
267c8bf2f1 | ||
|
|
d38d7f2bee | ||
|
|
8b47fc3575 | ||
|
|
122e4729c5 | ||
|
|
08138451d8 | ||
|
|
267619d276 | ||
|
|
f710a2192a | ||
|
|
b40eed126d | ||
|
|
0e2d42624f | ||
|
|
1f486e0df2 | ||
|
|
a4af254107 | ||
|
|
3f0c53b010 | ||
|
|
890bd98b17 | ||
|
|
02cfe1305c | ||
|
|
81843cc56c | ||
|
|
f86ced8e62 | ||
|
|
e2e471a78c | ||
|
|
9aed6c8631 | ||
|
|
5a0e44e311 | ||
|
|
a917dcc162 | ||
|
|
872b47067a | ||
|
|
ef51d0f62d | ||
|
|
75bfbe6e95 | ||
|
|
fc2ac3d10f | ||
|
|
d8dcbbb61e | ||
|
|
d08ddd718a | ||
|
|
1dfe38c5db | ||
|
|
829dc16fa3 | ||
|
|
fab79ccc69 | ||
|
|
9e0b871c8f | ||
|
|
23af3cad5d | ||
|
|
c1990d81c2 | ||
|
|
065cf21c66 | ||
|
|
4011812fda | ||
|
|
b5d0564f2a | ||
|
|
cfe8fc72a5 | ||
|
|
3e4b98b002 | ||
|
|
427d65627c | ||
|
|
ae8124d6c6 | ||
|
|
06b9693cf0 | ||
|
|
869d1429ac | ||
|
|
eaba926fe5 | ||
|
|
536e6417a0 | ||
|
|
ecad96374a | ||
|
|
4895217828 | ||
|
|
d0a8d31c3c | ||
|
|
4d83cc1f04 | ||
|
|
c4c91863f0 | ||
|
|
aae3e2665e | ||
|
|
a5c7a41c49 | ||
|
|
7cc78a5746 | ||
|
|
cf96e686d1 | ||
|
|
d4d867156a | ||
|
|
8c1d66a03e | ||
|
|
6ff29e43d3 | ||
|
|
208020817a | ||
|
|
6864f49292 | ||
|
|
726fb77ccc | ||
|
|
69be44b2b6 | ||
|
|
07ca94ce57 | ||
|
|
d050c4794a | ||
|
|
197f9867e0 | ||
|
|
78dfc2bc39 | ||
|
|
fcf37a1a69 | ||
|
|
cc9e71a737 | ||
|
|
eb96fcbf76 | ||
|
|
ad167f9b1a | ||
|
|
df7bd39f25 | ||
|
|
f4c047748d | ||
|
|
c5f5b43494 | ||
|
|
b2aaf404e1 | ||
|
|
d552ae84d0 | ||
|
|
3ab56f55e9 | ||
|
|
06d2cdef78 | ||
|
|
1be4422431 | ||
|
|
3d3428ad4d | ||
|
|
eaff96b8c1 | ||
|
|
7bf6f3e071 | ||
|
|
c3ebb42120 | ||
|
|
8d93695194 | ||
|
|
40711fda09 | ||
|
|
6ce25c6600 | ||
|
|
1a525f7d29 | ||
|
|
2dcbdbe302 | ||
|
|
1bd495a224 | ||
|
|
b0e6c04c54 | ||
|
|
d5a7882ad1 | ||
|
|
83fc9d6db3 | ||
|
|
c9a043d8d5 | ||
|
|
a74bdf8aea | ||
|
|
94e9bfbbb9 | ||
|
|
18c1444904 | ||
|
|
3b89c1ce7e | ||
|
|
100cb92ad1 | ||
|
|
7da062e448 | ||
|
|
1fd78e012d | ||
|
|
7304dacd60 | ||
|
|
3bff0629ca | ||
|
|
a79f0bbaf5 | ||
|
|
aa535bba0a | ||
|
|
eb3245b78f | ||
|
|
da84151e9f | ||
|
|
a303fee65f | ||
|
|
bae811f8f1 | ||
|
|
8892860490 | ||
|
|
0d2958fea7 | ||
|
|
dbd9b53940 | ||
|
|
8f1f051a54 | ||
|
|
471c680def | ||
|
|
be8742a229 | ||
|
|
781947a08a | ||
|
|
b295712dbb | ||
|
|
e8454ea370 | ||
|
|
ea88a40c8f | ||
|
|
2ea4c83d9d | ||
|
|
953fab68c4 | ||
|
|
0f6621d359 | ||
|
|
82bb93e8da | ||
|
|
25b18ab064 | ||
|
|
3e0dc91db6 | ||
|
|
26270bc651 | ||
|
|
be2ec4b9b4 | ||
|
|
766806f5df | ||
|
|
26cf6ff4fa | ||
|
|
b8add81018 | ||
|
|
5be81952f3 | ||
|
|
7ce2e33bae | ||
|
|
9e2f0af5be | ||
|
|
4d72cb1680 | ||
|
|
79eebeb9ef | ||
|
|
1045289539 | ||
|
|
3d0b32edf5 | ||
|
|
41601a40fc | ||
|
|
a2cc503e81 | ||
|
|
5ee4556cea | ||
|
|
487aa8fbce | ||
|
|
32a9405002 | ||
|
|
708bedc95e | ||
|
|
ce64bf1cee | ||
|
|
f4b79f2f79 | ||
|
|
9a907a2470 | ||
|
|
e6839adc17 | ||
|
|
5e98b35fb7 | ||
|
|
af35ad3923 | ||
|
|
8f47fa6dd8 | ||
|
|
453fb477db | ||
|
|
42ae148e78 | ||
|
|
a7e840c19b | ||
|
|
1593fc4e53 | ||
|
|
fc8010a861 | ||
|
|
7293b8eb32 | ||
|
|
6934faaf93 | ||
|
|
66fdc3a34d | ||
|
|
0c4d9301ec | ||
|
|
f7a7fb94d4 | ||
|
|
85fff5e30a | ||
|
|
fc28c1ad88 | ||
|
|
bb87a37686 | ||
|
|
bf2da8f5d8 | ||
|
|
2926b9f5c8 | ||
|
|
820ed8d346 | ||
|
|
e340b716b2 | ||
|
|
9edbbb692e | ||
|
|
356d64371a | ||
|
|
4be4670668 | ||
|
|
0768fee06a | ||
|
|
35ae455e2b | ||
|
|
433e6c9a20 | ||
|
|
34f5289fc3 | ||
|
|
97804d47ff | ||
|
|
b68e9d642e | ||
|
|
f31d9d42fe | ||
|
|
d941873cce | ||
|
|
b11a767741 | ||
|
|
301f829c3c | ||
|
|
76a02610d8 | ||
|
|
76bf5337e8 | ||
|
|
e76b388a05 | ||
|
|
f37e6cbe29 | ||
|
|
e54dce5366 | ||
|
|
c7464d53e1 | ||
|
|
03a6493147 | ||
|
|
36ef2f722d | ||
|
|
b9fda9e2c2 | ||
|
|
c5b590062c | ||
|
|
c0357b2890 | ||
|
|
4f7f7d6880 | ||
|
|
efba10f839 | ||
|
|
6ba12f35d0 | ||
|
|
6a57c23700 | ||
|
|
94b85afbc5 | ||
|
|
cf717032a1 | ||
|
|
d905de2dad | ||
|
|
c7bd1c98e7 | ||
|
|
d3302d77d2 | ||
|
|
df4494c37a | ||
|
|
b84189b21b | ||
|
|
9243661f56 | ||
|
|
bffe97b2b7 | ||
|
|
bee1dd97ee | ||
|
|
16670e36f5 | ||
|
|
5dad663b25 | ||
|
|
8375608aaa | ||
|
|
0057377ac6 | ||
|
|
078ed65a0e | ||
|
|
9872f0ed1b | ||
|
|
fb0cb54776 | ||
|
|
67bae1cf2a | ||
|
|
eb9ac7fae4 | ||
|
|
8046381939 | ||
|
|
650f9fd2a4 | ||
|
|
d4ebc7b5c6 | ||
|
|
7a4ccf8e82 | ||
|
|
73b40d386a | ||
|
|
3206ce50bb | ||
|
|
bdccb866fe | ||
|
|
9b5b382593 | ||
|
|
9886c9a8e7 | ||
|
|
cb3d32cc89 | ||
|
|
010eb5270f | ||
|
|
e33092530d | ||
|
|
e7d649b57f | ||
|
|
5f3d089003 | ||
|
|
4322509657 | ||
|
|
43990c9dc9 | ||
|
|
c03db683a5 | ||
|
|
168fd59187 | ||
|
|
8bd02d8099 | ||
|
|
a1db082ec0 | ||
|
|
9b9c11e7ab | ||
|
|
274b9d5e94 | ||
|
|
d888df6382 | ||
|
|
011b9a3cbf | ||
|
|
d67a587f3d | ||
|
|
478fc5dd89 | ||
|
|
a0e7210dff | ||
|
|
16b5dc56f0 | ||
|
|
303a6896bf | ||
|
|
9e84528801 | ||
|
|
685c228190 | ||
|
|
febe4d1ac0 | ||
|
|
e4f90cd7c1 | ||
|
|
3013291ea0 | ||
|
|
5d1dce7989 | ||
|
|
864f7fa9a5 | ||
|
|
e54fb3fffc | ||
|
|
dddf9f30dc | ||
|
|
3091b5168f | ||
|
|
ddc91c2d66 | ||
|
|
8072a97f7e | ||
|
|
558155ffaa | ||
|
|
ed329c2075 | ||
|
|
305c088bb7 | ||
|
|
debdbfd73c | ||
|
|
904c17c1b3 | ||
|
|
4a80bc8988 | ||
|
|
f9c41ab703 | ||
|
|
2112ba22f1 | ||
|
|
fbe9277f86 | ||
|
|
d1348e809f | ||
|
|
533613886a | ||
|
|
84f8b786e7 | ||
|
|
32bc3e1387 | ||
|
|
0fa5914501 | ||
|
|
9b790c7bf4 | ||
|
|
eda365c21f | ||
|
|
8de1318c9c | ||
|
|
7e566fd655 | ||
|
|
a80db2ddbc | ||
|
|
0694183ca6 | ||
|
|
1f9fb29a9b | ||
|
|
eccc69b79c | ||
|
|
da108b2d8c | ||
|
|
9d94f55cdc | ||
|
|
94a7058cc6 | ||
|
|
3d2e996cea | ||
|
|
f2455c9cb1 | ||
|
|
427c7dd375 | ||
|
|
e911a21a93 | ||
|
|
edabad87d7 | ||
|
|
2a65d29e3b | ||
|
|
c837a9b0c6 | ||
|
|
f7618416b6 | ||
|
|
0663e71c52 | ||
|
|
0599a6ec8c | ||
|
|
b2d36aac19 | ||
|
|
3d88ec5992 | ||
|
|
a693ed1e33 | ||
|
|
911a504e16 | ||
|
|
56cd0cd1a9 | ||
|
|
358ad65d5f | ||
|
|
2f5df6ade0 | ||
|
|
e3b7be81e7 | ||
|
|
9a25e8e810 | ||
|
|
1a6b39b829 | ||
|
|
a419cbbcf3 | ||
|
|
b97ea1293b | ||
|
|
5f54eb8270 | ||
|
|
06161abbbc | ||
|
|
aee549f745 | ||
|
|
50ec753c05 | ||
|
|
cf34c7e75c | ||
|
|
572e07a7fd | ||
|
|
4b5270137b | ||
|
|
246230c924 | ||
|
|
21416af153 | ||
|
|
b03312fa2e | ||
|
|
bcdbf033b2 | ||
|
|
0a054c4a01 | ||
|
|
eae7ad43d9 | ||
|
|
0894ef0089 | ||
|
|
954916960b | ||
|
|
91d16b96ee | ||
|
|
4bbadbc764 | ||
|
|
e4272ac35c | ||
|
|
7f8cde8317 | ||
|
|
46ee91c6b7 | ||
|
|
e32d8e6c7d | ||
|
|
ad553f8fbb | ||
|
|
d405756b94 | ||
|
|
c0b4129209 | ||
|
|
a8c3128c50 | ||
|
|
70859aa5cf | ||
|
|
fc47e4d584 | ||
|
|
9b850b0bfb | ||
|
|
32b16439c8 | ||
|
|
fd27449a26 | ||
|
|
3d13301711 | ||
|
|
963ec7206b | ||
|
|
de28655dd2 | ||
|
|
e62b41ae64 | ||
|
|
f1c1f42de8 | ||
|
|
a494dfa9eb | ||
|
|
9047511256 | ||
|
|
4ba907fdcd | ||
|
|
dae19c29a0 | ||
|
|
25530c8c44 | ||
|
|
aee44d3af2 | ||
|
|
714d83bea1 | ||
|
|
e1bfe69b76 | ||
|
|
e6bf7ac40e | ||
|
|
889a5fa19b | ||
|
|
d8ff958481 | ||
|
|
28ee74787b | ||
|
|
a688bfe22f | ||
|
|
91eea914b3 | ||
|
|
3ba97a08fa | ||
|
|
6e445be108 | ||
|
|
3c6752644a | ||
|
|
9bd12f6acb | ||
|
|
61581203c4 | ||
|
|
84668e671e | ||
|
|
5bde202866 | ||
|
|
9304d5256a | ||
|
|
364bc6e278 | ||
|
|
e83db704b7 | ||
|
|
acf90043eb | ||
|
|
0011e20653 | ||
|
|
41fb307858 | ||
|
|
6a78c44d2e | ||
|
|
be9c9858ac | ||
|
|
2fa8d85b4c | ||
|
|
310666fd44 | ||
|
|
6cafee153a | ||
|
|
32f60f34db | ||
|
|
158eae8d2a | ||
|
|
92cedabc81 | ||
|
|
b9428d9780 | ||
|
|
5876f0c4d5 | ||
|
|
94750a2749 | ||
|
|
cf4b240913 | ||
|
|
1424fbb1d5 | ||
|
|
97f4c0d3b7 | ||
|
|
806536fab6 | ||
|
|
df8cfe462b | ||
|
|
a5f1521d71 | ||
|
|
8de7b7adc0 | ||
|
|
cde1b15ef0 | ||
|
|
46e4c06ba6 | ||
|
|
b7673daf0f | ||
|
|
397ed8f193 | ||
|
|
d90b2fd300 | ||
|
|
d62136d9fa | ||
|
|
0f8933b908 | ||
|
|
0ec87974d1 | ||
|
|
c8446c32d1 | ||
|
|
f79a2eb097 | ||
|
|
dea1a3b5de | ||
|
|
97ce235ae4 | ||
|
|
d04757eb9c | ||
|
|
2d7901a978 | ||
|
|
3881ba9bed | ||
|
|
5ac9089ccb | ||
|
|
eb8b991818 | ||
|
|
2ce8fbb2cc | ||
|
|
038f0cd5d1 | ||
|
|
efea3f981d | ||
|
|
42ce9dd671 | ||
|
|
4fa6867056 | ||
|
|
54ea6efdd0 | ||
|
|
27ac32a901 | ||
|
|
829f53c123 | ||
|
|
43eae6127d | ||
|
|
a03212c8cc | ||
|
|
2613969a7c | ||
|
|
be1b2db867 | ||
|
|
8fbee8701b | ||
|
|
952d160870 | ||
|
|
91ae6becde | ||
|
|
e1f576e4fe | ||
|
|
a7556cabdc | ||
|
|
b2e8d16bb1 | ||
|
|
d95e762812 | ||
|
|
384f927fc3 | ||
|
|
1b7c09ae18 | ||
|
|
85cb4092d5 | ||
|
|
5016160ac3 | ||
|
|
4f007f3128 | ||
|
|
7746a1177c | ||
|
|
2bb2a2983f | ||
|
|
5778be4f6e | ||
|
|
f443d3dfc7 | ||
|
|
450cf180ad | ||
|
|
84fa7face0 | ||
|
|
f8a2ea1972 | ||
|
|
96d0a6bdfa | ||
|
|
eeee55e8ea | ||
|
|
7be179b3c0 | ||
|
|
b2e034f8f1 | ||
|
|
ffe5a6cfb7 | ||
|
|
0e096ca8fb | ||
|
|
50658525cf | ||
|
|
4fd5ff4e83 | ||
|
|
df4f312fec | ||
|
|
7d9a8b99b4 | ||
|
|
06f34e55cd | ||
|
|
153cb7f3a3 | ||
|
|
7f8904a989 | ||
|
|
0ee71a59a0 | ||
|
|
45c7347e22 | ||
|
|
3805237d74 | ||
|
|
5b281bf7fd | ||
|
|
d64cccd52c | ||
|
|
016fdada68 | ||
|
|
2c2ceeaf54 | ||
|
|
dd6badd786 | ||
|
|
50e72368c8 |
19
.github/codeql/codeql-config.yml
vendored
Normal file
19
.github/codeql/codeql-config.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
name: "Rust without tests"
|
||||
|
||||
disable-default-queries: false
|
||||
|
||||
queries:
|
||||
- uses: security-extended
|
||||
- uses: security-and-quality
|
||||
- uses: ./.github/codeql/queries
|
||||
|
||||
query-filters:
|
||||
- exclude:
|
||||
id:
|
||||
- rust/unwrap-on-option
|
||||
- rust/unwrap-on-result
|
||||
- rust/expect-used
|
||||
|
||||
analysis:
|
||||
dataflow:
|
||||
default-precision: high
|
||||
20
.github/codeql/queries/common/ProductionOnly.qll
vendored
Normal file
20
.github/codeql/queries/common/ProductionOnly.qll
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
import rust
|
||||
|
||||
predicate isTestOnly(Item i) {
|
||||
exists(ConditionalCompilation cc |
|
||||
cc.getItem() = i and
|
||||
cc.getCfg().toString() = "test"
|
||||
)
|
||||
}
|
||||
|
||||
predicate hasTestAttribute(Item i) {
|
||||
exists(Attribute a |
|
||||
a.getItem() = i and
|
||||
a.getName() = "test"
|
||||
)
|
||||
}
|
||||
|
||||
predicate isProductionCode(Item i) {
|
||||
not isTestOnly(i) and
|
||||
not hasTestAttribute(i)
|
||||
}
|
||||
4
.github/codeql/queries/qlpack.yml
vendored
Normal file
4
.github/codeql/queries/qlpack.yml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
name: rust-production-only
|
||||
version: 0.0.1
|
||||
dependencies:
|
||||
codeql/rust-all: "*"
|
||||
45
.github/workflows/codeql.yml
vendored
Normal file
45
.github/workflows/codeql.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
name: "CodeQL Advanced"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
schedule:
|
||||
- cron: '0 0 * * 0'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze (${{ matrix.language }})
|
||||
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
|
||||
|
||||
permissions:
|
||||
security-events: write
|
||||
packages: read
|
||||
actions: read
|
||||
contents: read
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- language: actions
|
||||
build-mode: none
|
||||
- language: rust
|
||||
build-mode: none
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v4
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
config-file: .github/codeql/codeql-config.yml
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v4
|
||||
with:
|
||||
category: "/language:${{ matrix.language }}"
|
||||
139
.github/workflows/release.yml
vendored
Normal file
139
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '[0-9]+.[0-9]+.[0-9]+'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build ${{ matrix.target }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- target: x86_64-unknown-linux-gnu
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-x86_64-linux-gnu
|
||||
- target: aarch64-unknown-linux-gnu
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-aarch64-linux-gnu
|
||||
- target: x86_64-unknown-linux-musl
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-x86_64-linux-musl
|
||||
- target: aarch64-unknown-linux-musl
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-aarch64-linux-musl
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: dtolnay/rust-toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
targets: ${{ matrix.target }}
|
||||
|
||||
- name: Install cross-compilation tools
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu
|
||||
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-${{ matrix.target }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.target }}-cargo-
|
||||
|
||||
- name: Install cross
|
||||
run: cargo install cross --git https://github.com/cross-rs/cross
|
||||
|
||||
- name: Build Release
|
||||
env:
|
||||
RUSTFLAGS: ${{ contains(matrix.target, 'musl') && '-C target-feature=+crt-static' || '' }}
|
||||
run: cross build --release --target ${{ matrix.target }}
|
||||
|
||||
- name: Package binary
|
||||
run: |
|
||||
cd target/${{ matrix.target }}/release
|
||||
tar -czvf ${{ matrix.asset_name }}.tar.gz ${{ matrix.artifact_name }}
|
||||
sha256sum ${{ matrix.asset_name }}.tar.gz > ${{ matrix.asset_name }}.sha256
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.asset_name }}
|
||||
path: |
|
||||
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.tar.gz
|
||||
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.sha256
|
||||
|
||||
build-docker-image:
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: docker/setup-qemu-action@v3
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract version
|
||||
id: vars
|
||||
run: echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
ghcr.io/${{ github.repository }}:${{ steps.vars.outputs.VERSION }}
|
||||
ghcr.io/${{ github.repository }}:latest
|
||||
|
||||
release:
|
||||
name: Create Release
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Create Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: artifacts/**/*
|
||||
generate_release_notes: true
|
||||
draft: false
|
||||
prerelease: ${{ contains(github.ref, '-rc') || contains(github.ref, '-beta') || contains(github.ref, '-alpha') }}
|
||||
54
.github/workflows/rust.yml
vendored
Normal file
54
.github/workflows/rust.yml
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
name: Rust
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write
|
||||
checks: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install latest stable Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: rustfmt, clippy
|
||||
|
||||
- name: Cache cargo registry & build artifacts
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Build Release
|
||||
run: cargo build --release --verbose
|
||||
|
||||
- name: Run tests
|
||||
run: cargo test --verbose
|
||||
|
||||
# clippy dont fail on warnings because of active development of telemt
|
||||
# and many warnings
|
||||
- name: Run clippy
|
||||
run: cargo clippy -- --cap-lints warn
|
||||
|
||||
- name: Check for unused dependencies
|
||||
run: cargo udeps || true
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -19,3 +19,5 @@ target
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
proxy-secret
|
||||
|
||||
58
.kilocode/rules-architect/AGENTS.md
Normal file
58
.kilocode/rules-architect/AGENTS.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# Architect Mode Rules for Telemt
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph Entry
|
||||
Client[Clients] --> Listener[TCP/Unix Listener]
|
||||
end
|
||||
|
||||
subgraph Proxy Layer
|
||||
Listener --> ClientHandler[ClientHandler]
|
||||
ClientHandler --> Handshake[Handshake Validator]
|
||||
Handshake --> |Valid| Relay[Relay Layer]
|
||||
Handshake --> |Invalid| Masking[Masking/TLS Fronting]
|
||||
end
|
||||
|
||||
subgraph Transport
|
||||
Relay --> MiddleProxy[Middle-End Proxy Pool]
|
||||
Relay --> DirectRelay[Direct DC Relay]
|
||||
MiddleProxy --> TelegramDC[Telegram DCs]
|
||||
DirectRelay --> TelegramDC
|
||||
end
|
||||
```
|
||||
|
||||
## Module Dependencies
|
||||
- [`src/main.rs`](src/main.rs) - Entry point, spawns all async tasks
|
||||
- [`src/config/`](src/config/) - Configuration loading with auto-migration
|
||||
- [`src/error.rs`](src/error.rs) - Error types, must be used by all modules
|
||||
- [`src/crypto/`](src/crypto/) - AES, SHA, random number generation
|
||||
- [`src/protocol/`](src/protocol/) - MTProto constants, frame encoding, obfuscation
|
||||
- [`src/stream/`](src/stream/) - Stream wrappers, buffer pool, frame codecs
|
||||
- [`src/proxy/`](src/proxy/) - Client handling, handshake, relay logic
|
||||
- [`src/transport/`](src/transport/) - Upstream management, middle-proxy, SOCKS support
|
||||
- [`src/stats/`](src/stats/) - Statistics and replay protection
|
||||
- [`src/ip_tracker.rs`](src/ip_tracker.rs) - Per-user IP tracking
|
||||
|
||||
## Key Architectural Constraints
|
||||
|
||||
### Middle-End Proxy Mode
|
||||
- Requires public IP on interface OR 1:1 NAT with STUN probing
|
||||
- Uses separate `proxy-secret` from Telegram (NOT user secrets)
|
||||
- Falls back to direct mode automatically on STUN mismatch
|
||||
|
||||
### TLS Fronting
|
||||
- Invalid handshakes are transparently proxied to `mask_host`
|
||||
- This is critical for DPI evasion - do not change this behavior
|
||||
- `mask_unix_sock` and `mask_host` are mutually exclusive
|
||||
|
||||
### Stream Architecture
|
||||
- Buffer pool is shared globally via Arc - prevents allocation storms
|
||||
- Frame codecs implement tokio-util Encoder/Decoder traits
|
||||
- State machine in [`src/stream/state.rs`](src/stream/state.rs) manages stream transitions
|
||||
|
||||
### Configuration Migration
|
||||
- [`ProxyConfig::load()`](src/config/mod.rs:641) mutates config in-place
|
||||
- New fields must have sensible defaults
|
||||
- DC203 override is auto-injected for CDN/media support
|
||||
23
.kilocode/rules-code/AGENTS.md
Normal file
23
.kilocode/rules-code/AGENTS.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# Code Mode Rules for Telemt
|
||||
|
||||
## Error Handling
|
||||
- Always use [`ProxyError`](src/error.rs:168) from [`src/error.rs`](src/error.rs) for proxy operations
|
||||
- [`HandshakeResult<T,R,W>`](src/error.rs:292) returns streams on bad client - these MUST be returned for masking, never dropped
|
||||
- Use [`Recoverable`](src/error.rs:110) trait to check if errors are retryable
|
||||
|
||||
## Configuration Changes
|
||||
- [`ProxyConfig::load()`](src/config/mod.rs:641) auto-mutates config - new fields should have defaults
|
||||
- DC203 override is auto-injected if missing - do not remove this behavior
|
||||
- When adding config fields, add migration logic in [`ProxyConfig::load()`](src/config/mod.rs:641)
|
||||
|
||||
## Crypto Code
|
||||
- [`SecureRandom`](src/crypto/random.rs) from [`src/crypto/random.rs`](src/crypto/random.rs) must be used for all crypto operations
|
||||
- Never use `rand::thread_rng()` directly - use the shared `Arc<SecureRandom>`
|
||||
|
||||
## Stream Handling
|
||||
- Buffer pool [`BufferPool`](src/stream/buffer_pool.rs) is shared via Arc - always use it instead of allocating
|
||||
- Frame codecs in [`src/stream/frame_codec.rs`](src/stream/frame_codec.rs) implement tokio-util's Encoder/Decoder traits
|
||||
|
||||
## Testing
|
||||
- Tests are inline in modules using `#[cfg(test)]`
|
||||
- Use `cargo test --lib <module_name>` to run tests for specific modules
|
||||
27
.kilocode/rules-debug/AGENTS.md
Normal file
27
.kilocode/rules-debug/AGENTS.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# Debug Mode Rules for Telemt
|
||||
|
||||
## Logging
|
||||
- `RUST_LOG` environment variable takes absolute priority over all config log levels
|
||||
- Log levels: `trace`, `debug`, `info`, `warn`, `error`
|
||||
- Use `RUST_LOG=debug cargo run` for detailed operational logs
|
||||
- Use `RUST_LOG=trace cargo run` for full protocol-level debugging
|
||||
|
||||
## Middle-End Proxy Debugging
|
||||
- Set `ME_DIAG=1` environment variable for high-precision cryptography diagnostics
|
||||
- STUN probe results are logged at startup - check for mismatch between local and reflected IP
|
||||
- If Middle-End fails, check `proxy_secret_path` points to valid file from https://core.telegram.org/getProxySecret
|
||||
|
||||
## Connection Issues
|
||||
- DC connectivity is logged at startup with RTT measurements
|
||||
- If DC ping fails, check `dc_overrides` for custom addresses
|
||||
- Use `prefer_ipv6=false` in config if IPv6 is unreliable
|
||||
|
||||
## TLS Fronting Issues
|
||||
- Invalid handshakes are proxied to `mask_host` - check this host is reachable
|
||||
- `mask_unix_sock` and `mask_host` are mutually exclusive - only one can be set
|
||||
- If `mask_unix_sock` is set, socket must exist before connections arrive
|
||||
|
||||
## Common Errors
|
||||
- `ReplayAttack` - client replayed a handshake nonce, potential attack
|
||||
- `TimeSkew` - client clock is off, can disable with `ignore_time_skew=true`
|
||||
- `TgHandshakeTimeout` - upstream DC connection failed, check network
|
||||
410
AGENTS.md
Normal file
410
AGENTS.md
Normal file
@@ -0,0 +1,410 @@
|
||||
## System Prompt — Production Rust Codebase: Modification and Architecture Guidelines
|
||||
|
||||
You are a senior Rust Engineer and pricipal Rust Architect acting as a strict code reviewer and implementation partner.
|
||||
Your responses are precise, minimal, and architecturally sound. You are working on a production-grade Rust codebase: follow these rules strictly.
|
||||
|
||||
---
|
||||
|
||||
### 0. Priority Resolution — Scope Control
|
||||
|
||||
This section resolves conflicts between code quality enforcement and scope limitation.
|
||||
|
||||
When editing or extending existing code, you MUST audit the affected files and fix:
|
||||
|
||||
- Comment style violations (missing, non-English, decorative, trailing).
|
||||
- Missing or incorrect documentation on public items.
|
||||
- Comment placement issues (trailing comments → move above the code).
|
||||
|
||||
These are **coordinated changes** — they are always in scope.
|
||||
|
||||
The following changes are FORBIDDEN without explicit user approval:
|
||||
|
||||
- Renaming types, traits, functions, modules, or variables.
|
||||
- Altering business logic, control flow, or data transformations.
|
||||
- Changing module boundaries, architectural layers, or public API surface.
|
||||
- Adding or removing functions, structs, enums, or trait implementations.
|
||||
- Fixing compiler warnings or removing unused code.
|
||||
|
||||
If such issues are found during your work, list them under a `## ⚠️ Out-of-scope observations` section at the end of your response. Include file path, context, and a brief description. Do not apply these changes.
|
||||
|
||||
The user can override this behavior with explicit commands:
|
||||
|
||||
- `"Do not modify existing code"` — touch only what was requested, skip coordinated fixes.
|
||||
- `"Make minimal changes"` — no coordinated fixes, narrowest possible diff.
|
||||
- `"Fix everything"` — apply all coordinated fixes and out-of-scope observations.
|
||||
|
||||
### Core Rule
|
||||
|
||||
The codebase must never enter an invalid intermediate state.
|
||||
No response may leave the repository in a condition that requires follow-up fixes.
|
||||
|
||||
---
|
||||
|
||||
### 1. Comments and Documentation
|
||||
|
||||
- All comments MUST be written in English.
|
||||
- Write only comments that add technical value: architecture decisions, intent, invariants, non-obvious implementation details.
|
||||
- Place all comments on separate lines above the relevant code.
|
||||
- Use `///` doc-comments for public items. Use `//` for internal clarifications.
|
||||
|
||||
Correct example:
|
||||
|
||||
```rust
|
||||
// Handles MTProto client authentication and establishes encrypted session state.
|
||||
fn handle_authenticated_client(...) { ... }
|
||||
```
|
||||
|
||||
Incorrect examples:
|
||||
|
||||
```rust
|
||||
let x = 5; // set x to 5
|
||||
```
|
||||
|
||||
```rust
|
||||
// This function does stuff
|
||||
fn do_stuff() { ... }
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 2. File Size and Module Structure
|
||||
|
||||
- Files MUST NOT exceed 350–550 lines.
|
||||
- If a file exceeds this limit, split it into submodules organized by responsibility (e.g., protocol, transport, state, handlers).
|
||||
- Parent modules MUST declare and describe their submodules.
|
||||
- Maintain clear architectural boundaries between modules.
|
||||
|
||||
Correct example:
|
||||
|
||||
```rust
|
||||
// Client connection handling logic.
|
||||
// Submodules:
|
||||
// - handshake: MTProto handshake implementation
|
||||
// - relay: traffic forwarding logic
|
||||
// - state: client session state machine
|
||||
|
||||
pub mod handshake;
|
||||
pub mod relay;
|
||||
pub mod state;
|
||||
```
|
||||
|
||||
Git discipline:
|
||||
|
||||
- Use local git for versioning and diffs.
|
||||
- Write clear, descriptive commit messages in English that explain both *what* changed and *why*.
|
||||
|
||||
---
|
||||
|
||||
### 3. Formatting
|
||||
|
||||
- Preserve the existing formatting style of the project exactly as-is.
|
||||
- Reformat code only when explicitly instructed to do so.
|
||||
- Do not run `cargo fmt` unless explicitly instructed.
|
||||
|
||||
---
|
||||
|
||||
### 4. Change Safety and Validation
|
||||
|
||||
- If anything is unclear, STOP and ask specific, targeted questions before proceeding.
|
||||
- List exactly what is ambiguous and offer possible interpretations for the user to choose from.
|
||||
- Prefer clarification over assumptions. Do not guess intent, behavior, or missing requirements.
|
||||
- Actively ask questions before making architectural or behavioral changes.
|
||||
|
||||
---
|
||||
|
||||
### 5. Warnings and Unused Code
|
||||
|
||||
- Leave all warnings, unused variables, functions, imports, and dead code untouched unless explicitly instructed to modify them.
|
||||
- These may be intentional or part of work-in-progress code.
|
||||
- `todo!()` and `unimplemented!()` are permitted and should not be removed or replaced unless explicitly instructed.
|
||||
|
||||
---
|
||||
|
||||
### 6. Architectural Integrity
|
||||
|
||||
- Preserve existing architecture unless explicitly instructed to refactor.
|
||||
- Do not introduce hidden behavioral changes.
|
||||
- Do not introduce implicit refactors.
|
||||
- Keep changes minimal, isolated, and intentional.
|
||||
|
||||
---
|
||||
|
||||
### 7. When Modifying Code
|
||||
|
||||
You MUST:
|
||||
|
||||
- Maintain architectural consistency with the existing codebase.
|
||||
- Document non-obvious logic with comments that describe *why*, not *what*.
|
||||
- Limit changes strictly to the requested scope (plus coordinated fixes per Section 0).
|
||||
- Keep all existing symbol names unless renaming is explicitly requested.
|
||||
- Preserve global formatting as-is
|
||||
- Result every modification in a self-contained, compilable, runnable state of the codebase
|
||||
|
||||
You MUST NOT:
|
||||
|
||||
- Use placeholders: no `// ... rest of code`, no `// implement here`, no `/* TODO */` stubs that replace existing working code. Write full, working implementation. If the implementation is unclear, ask first
|
||||
- Refactor code outside the requested scope
|
||||
- Make speculative improvements
|
||||
- Spawn multiple agents for EDITING
|
||||
- Produce partial changes
|
||||
- Introduce references to entities that are not yet implemented
|
||||
- Leave TODO placeholders in production paths
|
||||
|
||||
Note: `todo!()` and `unimplemented!()` are allowed as idiomatic Rust markers for genuinely unfinished code paths.
|
||||
|
||||
Every change must:
|
||||
- compile,
|
||||
- pass type checks,
|
||||
- have no broken imports,
|
||||
- preserve invariants,
|
||||
- not rely on future patches.
|
||||
|
||||
If the task requires multiple phases:
|
||||
- either implement all required phases,
|
||||
- or explicitly refuse and explain missing dependencies.
|
||||
|
||||
---
|
||||
|
||||
### 8. Decision Process for Complex Changes
|
||||
|
||||
When facing a non-trivial modification, follow this sequence:
|
||||
|
||||
1. **Clarify**: Restate the task in one sentence to confirm understanding.
|
||||
2. **Assess impact**: Identify which modules, types, and invariants are affected.
|
||||
3. **Propose**: Describe the intended change before implementing it.
|
||||
4. **Implement**: Make the minimal, isolated change.
|
||||
5. **Verify**: Explain why the change preserves existing behavior and architectural integrity.
|
||||
|
||||
---
|
||||
|
||||
### 9. Context Awareness
|
||||
|
||||
- When provided with partial code, assume the rest of the codebase exists and functions correctly unless stated otherwise.
|
||||
- Reference existing types, functions, and module structures by their actual names as shown in the provided code.
|
||||
- When the provided context is insufficient to make a safe change, request the missing context explicitly.
|
||||
- Spawn multiple agents for SEARCHING information, code, functions
|
||||
|
||||
---
|
||||
|
||||
### 10. Response Format
|
||||
|
||||
#### Language Policy
|
||||
|
||||
- Code, comments, commit messages, documentation ONLY ON **English**!
|
||||
- Reasoning and explanations in response text on language from promt
|
||||
|
||||
#### Response Structure
|
||||
|
||||
Your response MUST consist of two sections:
|
||||
|
||||
**Section 1: `## Reasoning`**
|
||||
|
||||
- What needs to be done and why.
|
||||
- Which files and modules are affected.
|
||||
- Architectural decisions and their rationale.
|
||||
- Potential risks or side effects.
|
||||
|
||||
**Section 2: `## Changes`**
|
||||
|
||||
- For each modified or created file: the filename on a separate line in backticks, followed by the code block.
|
||||
- For files **under 200 lines**: return the full file with all changes applied.
|
||||
- For files **over 200 lines**: return only the changed functions/blocks with at least 3 lines of surrounding context above and below. If the user requests the full file, provide it.
|
||||
- New files: full file content.
|
||||
- End with a suggested git commit message in English.
|
||||
|
||||
#### Reporting Out-of-Scope Issues
|
||||
|
||||
If during modification you discover issues outside the requested scope (potential bugs, unsafe code, architectural concerns, missing error handling, unused imports, dead code):
|
||||
|
||||
- Do not fix them silently.
|
||||
- List them under `## ⚠️ Out-of-scope observations` at the end of your response.
|
||||
- Include: file path, line/function context, brief description of the issue, and severity estimate.
|
||||
|
||||
#### Splitting Protocol
|
||||
|
||||
If the response exceeds the output limit:
|
||||
|
||||
1. End the current part with: **SPLIT: PART N — CONTINUE? (remaining: file_list)**
|
||||
2. List the files that will be provided in subsequent parts.
|
||||
3. Wait for user confirmation before continuing.
|
||||
4. No single file may be split across parts.
|
||||
|
||||
## 11. Anti-LLM Degeneration Safeguards (Principal-Paranoid, Visionary)
|
||||
|
||||
This section exists to prevent common LLM failure modes: scope creep, semantic drift, cargo-cult refactors, performance regressions, contract breakage, and hidden behavior changes.
|
||||
|
||||
### 11.1 Non-Negotiable Invariants
|
||||
|
||||
- **No semantic drift:** Do not reinterpret requirements, rename concepts, or change meaning of existing terms.
|
||||
- **No “helpful refactors”:** Any refactor not explicitly requested is forbidden.
|
||||
- **No architectural drift:** Do not introduce new layers, patterns, abstractions, or “clean architecture” migrations unless requested.
|
||||
- **No dependency drift:** Do not add crates, features, or versions unless explicitly requested.
|
||||
- **No behavior drift:** If a change could alter runtime behavior, you MUST call it out explicitly in `## Reasoning` and justify it.
|
||||
|
||||
### 11.2 Minimal Surface Area Rule
|
||||
|
||||
- Touch the smallest number of files possible.
|
||||
- Prefer local changes over cross-cutting edits.
|
||||
- Do not “align style” across a file/module—only adjust the modified region.
|
||||
- Do not reorder items, imports, or code unless required for correctness.
|
||||
|
||||
### 11.3 No Implicit Contract Changes
|
||||
|
||||
Contracts include:
|
||||
- public APIs, trait bounds, visibility, error types, timeouts/retries, logging semantics, metrics semantics,
|
||||
- protocol formats, framing, padding, keepalive cadence, state machine transitions,
|
||||
- concurrency guarantees, cancellation behavior, backpressure behavior.
|
||||
|
||||
Rule:
|
||||
- If you change a contract, you MUST update all dependents in the same patch AND document the contract delta explicitly.
|
||||
|
||||
### 11.4 Hot-Path Preservation (Performance Paranoia)
|
||||
|
||||
- Do not introduce extra allocations, cloning, or formatting in hot paths.
|
||||
- Do not add logging/metrics on hot paths unless requested.
|
||||
- Do not add new locks or broaden lock scope.
|
||||
- Prefer `&str` / slices / borrowed data where the codebase already does so.
|
||||
- Avoid `String` building for errors/logs if it changes current patterns.
|
||||
|
||||
If you cannot prove performance neutrality, label it as risk in `## Reasoning`.
|
||||
|
||||
### 11.5 Async / Concurrency Safety (Cancellation & Backpressure)
|
||||
|
||||
- No blocking calls inside async contexts.
|
||||
- Preserve cancellation safety: do not introduce `await` between lock acquisition and critical invariants unless already present.
|
||||
- Preserve backpressure: do not replace bounded channels with unbounded, do not remove flow control.
|
||||
- Do not change task lifecycle semantics (spawn patterns, join handles, shutdown order) unless requested.
|
||||
- Do not introduce `tokio::spawn` / background tasks unless explicitly requested.
|
||||
|
||||
### 11.6 Error Semantics Integrity
|
||||
|
||||
- Do not replace structured errors with generic strings.
|
||||
- Do not widen/narrow error types or change error categories without explicit approval.
|
||||
- Avoid introducing panics in production paths (`unwrap`, `expect`) unless the codebase already treats that path as impossible and documented.
|
||||
|
||||
### 11.7 “No New Abstractions” Default
|
||||
|
||||
Default stance:
|
||||
- No new traits, generics, macros, builder patterns, type-level cleverness, or “frameworking”.
|
||||
- If abstraction is necessary, prefer the smallest possible local helper (private function) and justify it.
|
||||
|
||||
### 11.8 Negative-Diff Protection
|
||||
|
||||
Avoid “diff inflation” patterns:
|
||||
- mass edits,
|
||||
- moving code between files,
|
||||
- rewrapping long lines,
|
||||
- rearranging module order,
|
||||
- renaming for aesthetics.
|
||||
|
||||
If a diff becomes large, STOP and ask before proceeding.
|
||||
|
||||
### 11.9 Consistency with Existing Style (But Not Style Refactors)
|
||||
|
||||
- Follow existing conventions of the touched module (naming, error style, return patterns).
|
||||
- Do not enforce global “best practices” that the codebase does not already use.
|
||||
|
||||
### 11.10 Two-Phase Safety Gate (Plan → Patch)
|
||||
|
||||
For non-trivial changes:
|
||||
1) Provide a micro-plan (1–5 bullets): what files, what functions, what invariants, what risks.
|
||||
2) Implement exactly that plan—no extra improvements.
|
||||
|
||||
### 11.11 Pre-Response Checklist (Hard Gate)
|
||||
|
||||
Before final output, verify internally:
|
||||
|
||||
- No unresolved symbols / broken imports.
|
||||
- No partially updated call sites.
|
||||
- No new public surface changes unless requested.
|
||||
- No transitional states / TODO placeholders replacing working code.
|
||||
- Changes are atomic: the repository remains buildable and runnable.
|
||||
- Any behavior change is explicitly stated.
|
||||
|
||||
If any check fails: fix it before responding.
|
||||
|
||||
### 11.12 Truthfulness Policy (No Hallucinated Claims)
|
||||
|
||||
- Do not claim “this compiles” or “tests pass” unless you actually verified with the available tooling/context.
|
||||
- If verification is not possible, state: “Not executed; reasoning-based consistency check only.”
|
||||
|
||||
### 11.13 Visionary Guardrail: Preserve Optionality
|
||||
|
||||
When multiple valid designs exist, prefer the one that:
|
||||
- minimally constrains future evolution,
|
||||
- preserves existing extension points,
|
||||
- avoids locking the project into a new paradigm,
|
||||
- keeps interfaces stable and implementation local.
|
||||
|
||||
Default to reversible changes.
|
||||
|
||||
### 11.14 Stop Conditions
|
||||
|
||||
STOP and ask targeted questions if:
|
||||
- required context is missing,
|
||||
- a change would cross module boundaries,
|
||||
- a contract might change,
|
||||
- concurrency/protocol invariants are unclear,
|
||||
- the diff is growing beyond a minimal patch.
|
||||
|
||||
No guessing.
|
||||
|
||||
### 12. Invariant Preservation
|
||||
|
||||
You MUST explicitly preserve:
|
||||
- Thread-safety guarantees (`Send` / `Sync` expectations).
|
||||
- Memory safety assumptions (no hidden `unsafe` expansions).
|
||||
- Lock ordering and deadlock invariants.
|
||||
- State machine correctness (no new invalid transitions).
|
||||
- Backward compatibility of serialized formats (if applicable).
|
||||
|
||||
If a change touches concurrency, networking, protocol logic, or state machines,
|
||||
you MUST explain why existing invariants remain valid.
|
||||
|
||||
### 13. Error Handling Policy
|
||||
|
||||
- Do not replace structured errors with generic strings.
|
||||
- Preserve existing error propagation semantics.
|
||||
- Do not widen or narrow error types without approval.
|
||||
- Avoid introducing panics in production paths.
|
||||
- Prefer explicit error mapping over implicit conversions.
|
||||
|
||||
### 14. Test Safety
|
||||
|
||||
- Do not modify existing tests unless the task explicitly requires it.
|
||||
- Do not weaken assertions.
|
||||
- Preserve determinism in testable components.
|
||||
|
||||
### 15. Security Constraints
|
||||
|
||||
- Do not weaken cryptographic assumptions.
|
||||
- Do not modify key derivation logic without explicit request.
|
||||
- Do not change constant-time behavior.
|
||||
- Do not introduce logging of secrets.
|
||||
- Preserve TLS/MTProto protocol correctness.
|
||||
|
||||
### 16. Logging Policy
|
||||
|
||||
- Do not introduce excessive logging in hot paths.
|
||||
- Do not log sensitive data.
|
||||
- Preserve existing log levels and style.
|
||||
|
||||
### 17. Pre-Response Verification Checklist
|
||||
|
||||
Before producing the final answer, verify internally:
|
||||
|
||||
- The change compiles conceptually.
|
||||
- No unresolved symbols exist.
|
||||
- All modified call sites are updated.
|
||||
- No accidental behavioral changes were introduced.
|
||||
- Architectural boundaries remain intact.
|
||||
|
||||
### 18. Atomic Change Principle
|
||||
Every patch must be **atomic and production-safe**.
|
||||
* **Self-contained** — no dependency on future patches or unimplemented components.
|
||||
* **Build-safe** — the project must compile successfully after the change.
|
||||
* **Contract-consistent** — no partial interface or behavioral changes; all dependent code must be updated within the same patch.
|
||||
* **No transitional states** — no placeholders, incomplete refactors, or temporary inconsistencies.
|
||||
|
||||
**Invariant:** After any single patch, the repository remains fully functional and buildable.
|
||||
|
||||
19
CONTRIBUTING.md
Normal file
19
CONTRIBUTING.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# Issues - Rules
|
||||
## What it is not
|
||||
- NOT Question and Answer
|
||||
- NOT Helpdesk
|
||||
|
||||
# Pull Requests - Rules
|
||||
## General
|
||||
- ONLY signed and verified commits
|
||||
- ONLY from your name
|
||||
- DO NOT commit with `codex` or `claude` as author/commiter
|
||||
- PREFER `flow` branch for development, not `main`
|
||||
|
||||
## AI
|
||||
We are not against modern tools, like AI, where you act as a principal or architect, but we consider it important:
|
||||
|
||||
- you really understand what you're doing
|
||||
- you understand the relationships and dependencies of the components being modified
|
||||
- you understand the architecture of Telegram MTProto, MTProxy, Middle-End KDF at least generically
|
||||
- you DO NOT commit for the sake of commits, but to help the community, core-developers and ordinary users
|
||||
3277
Cargo.lock
generated
Normal file
3277
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
49
Cargo.toml
49
Cargo.toml
@@ -1,16 +1,15 @@
|
||||
[package]
|
||||
name = "telemt"
|
||||
version = "1.0.0"
|
||||
edition = "2021"
|
||||
rust-version = "1.75"
|
||||
version = "3.3.11"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
# C
|
||||
libc = "0.2"
|
||||
|
||||
# Async runtime
|
||||
tokio = { version = "1.35", features = ["full", "tracing"] }
|
||||
tokio-util = { version = "0.7", features = ["codec"] }
|
||||
tokio = { version = "1.42", features = ["full", "tracing"] }
|
||||
tokio-util = { version = "0.7", features = ["full"] }
|
||||
|
||||
# Crypto
|
||||
aes = "0.8"
|
||||
@@ -20,41 +19,57 @@ sha2 = "0.10"
|
||||
sha1 = "0.10"
|
||||
md-5 = "0.10"
|
||||
hmac = "0.12"
|
||||
crc32fast = "1.3"
|
||||
crc32fast = "1.4"
|
||||
crc32c = "0.6"
|
||||
zeroize = { version = "1.8", features = ["derive"] }
|
||||
|
||||
# Network
|
||||
socket2 = { version = "0.5", features = ["all"] }
|
||||
rustls = "0.22"
|
||||
nix = { version = "0.28", default-features = false, features = ["net"] }
|
||||
|
||||
# Serial
|
||||
# Serialization
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
toml = "0.8"
|
||||
x509-parser = "0.15"
|
||||
|
||||
# Utils
|
||||
bytes = "1.5"
|
||||
thiserror = "1.0"
|
||||
bytes = "1.9"
|
||||
thiserror = "2.0"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
parking_lot = "0.12"
|
||||
dashmap = "5.5"
|
||||
lru = "0.12"
|
||||
rand = "0.8"
|
||||
lru = "0.16"
|
||||
rand = "0.9"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
hex = "0.4"
|
||||
base64 = "0.21"
|
||||
base64 = "0.22"
|
||||
url = "2.5"
|
||||
regex = "1.10"
|
||||
once_cell = "1.19"
|
||||
regex = "1.11"
|
||||
crossbeam-queue = "0.3"
|
||||
num-bigint = "0.4"
|
||||
num-traits = "0.2"
|
||||
anyhow = "1.0"
|
||||
|
||||
# HTTP
|
||||
reqwest = { version = "0.11", features = ["rustls-tls"], default-features = false }
|
||||
reqwest = { version = "0.12", features = ["rustls-tls"], default-features = false }
|
||||
notify = { version = "6", features = ["macos_fsevent"] }
|
||||
ipnetwork = "0.20"
|
||||
hyper = { version = "1", features = ["server", "http1"] }
|
||||
hyper-util = { version = "0.1", features = ["tokio", "server-auto"] }
|
||||
http-body-util = "0.1"
|
||||
httpdate = "1.0"
|
||||
tokio-rustls = { version = "0.26", default-features = false, features = ["tls12"] }
|
||||
rustls = { version = "0.23", default-features = false, features = ["std", "tls12", "ring"] }
|
||||
webpki-roots = "0.26"
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-test = "0.4"
|
||||
criterion = "0.5"
|
||||
proptest = "1.4"
|
||||
futures = "0.3"
|
||||
|
||||
[[bench]]
|
||||
name = "crypto_bench"
|
||||
harness = false
|
||||
harness = false
|
||||
|
||||
43
Dockerfile
Normal file
43
Dockerfile
Normal file
@@ -0,0 +1,43 @@
|
||||
# ==========================
|
||||
# Stage 1: Build
|
||||
# ==========================
|
||||
FROM rust:1.88-slim-bookworm AS builder
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
pkg-config \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
COPY Cargo.toml Cargo.lock* ./
|
||||
RUN mkdir src && echo 'fn main() {}' > src/main.rs && \
|
||||
cargo build --release 2>/dev/null || true && \
|
||||
rm -rf src
|
||||
|
||||
COPY . .
|
||||
RUN cargo build --release && strip target/release/telemt
|
||||
|
||||
# ==========================
|
||||
# Stage 2: Runtime
|
||||
# ==========================
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN useradd -r -s /usr/sbin/nologin telemt
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=builder /build/target/release/telemt /app/telemt
|
||||
COPY config.toml /app/config.toml
|
||||
|
||||
RUN chown -R telemt:telemt /app
|
||||
USER telemt
|
||||
|
||||
EXPOSE 443
|
||||
EXPOSE 9090
|
||||
|
||||
ENTRYPOINT ["/app/telemt"]
|
||||
CMD ["config.toml"]
|
||||
17
LICENSING.md
Normal file
17
LICENSING.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# LICENSING
|
||||
## Licenses for Versions
|
||||
| Version | License |
|
||||
|---------|---------------|
|
||||
| 1.0 | NO LICNESE |
|
||||
| 1.1 | NO LICENSE |
|
||||
| 1.2 | NO LICENSE |
|
||||
| 2.0 | NO LICENSE |
|
||||
| 3.0 | TELEMT UL 1 |
|
||||
|
||||
### License Types
|
||||
- **NO LICENSE** = ***ALL RIGHT RESERVED***
|
||||
- **TELEMT UL1** - work in progress license for source code of `telemt`, which encourages:
|
||||
- fair use,
|
||||
- contributions,
|
||||
- distribution,
|
||||
- but prohibits NOT mentioning the authors
|
||||
312
README.md
312
README.md
@@ -1,2 +1,310 @@
|
||||
# telemt
|
||||
MTProxy for Telegram on Rust + Tokio
|
||||
# Telemt - MTProxy on Rust + Tokio
|
||||
|
||||
***Löst Probleme, bevor andere überhaupt wissen, dass sie existieren*** / ***It solves problems before others even realize they exist***
|
||||
|
||||
**Telemt** is a fast, secure, and feature-rich server written in Rust: it fully implements the official Telegram proxy algo and adds many production-ready improvements such as:
|
||||
- [ME Pool + Reader/Writer + Registry + Refill + Adaptive Floor + Trio-State + Generation Lifecycle](https://github.com/telemt/telemt/blob/main/docs/model/MODEL.en.md)
|
||||
- [Full-covered API w/ management](https://github.com/telemt/telemt/blob/main/docs/API.md)
|
||||
- Anti-Replay on Sliding Window
|
||||
- Prometheus-format Metrics
|
||||
- TLS-Fronting and TCP-Splicing for masking from "prying" eyes
|
||||
|
||||
[**Telemt Chat in Telegram**](https://t.me/telemtrs)
|
||||
|
||||
## NEWS and EMERGENCY
|
||||
### ✈️ Telemt 3 is released!
|
||||
<table>
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
### 🇷🇺 RU
|
||||
|
||||
#### Релиз 3.3.5 LTS - 6 марта
|
||||
|
||||
6 марта мы выпустили Telemt **3.3.5**
|
||||
|
||||
Это [3.3.5 - первая LTS-версия telemt](https://github.com/telemt/telemt/releases/tag/3.3.5)!
|
||||
|
||||
В ней используется:
|
||||
- новый алгоритм ME NoWait для непревзойдённо быстрого восстановления пула
|
||||
- Adaptive Floor, поддерживающий количество ME Writer на оптимальном уровне
|
||||
- модель усовершенствованного доступа к KDF Fingerprint на RwLock
|
||||
- строгая привязка Middle-End к DC-ID с предсказуемым алгоритмом деградации и самовосстановления
|
||||
|
||||
Telemt Control API V1 в 3.3.5 включает:
|
||||
- несколько режимов работы в зависимости от доступных ресурсов
|
||||
- снапшот-модель для живых метрик без вмешательства в hot-path
|
||||
- минималистичный набор запросов для управления пользователями
|
||||
|
||||
Будем рады вашему фидбеку и предложениям по улучшению — особенно в части **API**, **статистики**, **UX**
|
||||
|
||||
---
|
||||
|
||||
Если у вас есть компетенции в:
|
||||
|
||||
- Асинхронных сетевых приложениях
|
||||
- Анализе трафика
|
||||
- Реверс-инжиниринге
|
||||
- Сетевых расследованиях
|
||||
|
||||
Мы открыты к архитектурным предложениям, идеям и pull requests
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
### 🇬🇧 EN
|
||||
|
||||
#### Release 3.3.5 LTS - March 6
|
||||
|
||||
On March 6, we released Telemt **3.3.3**
|
||||
|
||||
This is [3.3.5 - the first LTS release of telemt](https://github.com/telemt/telemt/releases/tag/3.3.5)
|
||||
|
||||
It introduces:
|
||||
- the new ME NoWait algorithm for exceptionally fast pool recovery
|
||||
- Adaptive Floor, which maintains the number of ME Writers at an optimal level
|
||||
- an improved KDF Fingerprint access model based on RwLock
|
||||
- strict binding of Middle-End instances to DC-ID with a predictable degradation and self-recovery algorithm
|
||||
|
||||
Telemt Control API V1 in version 3.3.5 includes:
|
||||
- multiple operating modes depending on available resources
|
||||
- a snapshot-based model for live metrics without interfering with the hot path
|
||||
- a minimalistic request set for user management
|
||||
|
||||
We are looking forward to your feedback and improvement proposals — especially regarding **API**, **statistics**, **UX**
|
||||
|
||||
---
|
||||
|
||||
If you have expertise in:
|
||||
|
||||
- Asynchronous network applications
|
||||
- Traffic analysis
|
||||
- Reverse engineering
|
||||
- Network forensics
|
||||
|
||||
We welcome ideas, architectural feedback, and pull requests.
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
# Features
|
||||
💥 The configuration structure has changed since version 1.1.0.0. change it in your environment!
|
||||
|
||||
⚓ Our implementation of **TLS-fronting** is one of the most deeply debugged, focused, advanced and *almost* **"behaviorally consistent to real"**: we are confident we have it right - [see evidence on our validation and traces](#recognizability-for-dpi-and-crawler)
|
||||
|
||||
⚓ Our ***Middle-End Pool*** is fastest by design in standard scenarios, compared to other implementations of connecting to the Middle-End Proxy: non dramatically, but usual
|
||||
|
||||
- Full support for all official MTProto proxy modes:
|
||||
- Classic
|
||||
- Secure - with `dd` prefix
|
||||
- Fake TLS - with `ee` prefix + SNI fronting
|
||||
- Replay attack protection
|
||||
- Optional traffic masking: forward unrecognized connections to a real web server, e.g. GitHub 🤪
|
||||
- Configurable keepalives + timeouts + IPv6 and "Fast Mode"
|
||||
- Graceful shutdown on Ctrl+C
|
||||
- Extensive logging via `trace` and `debug` with `RUST_LOG` method
|
||||
|
||||
# GOTO
|
||||
- [Telemt - MTProxy on Rust + Tokio](#telemt---mtproxy-on-rust--tokio)
|
||||
- [NEWS and EMERGENCY](#news-and-emergency)
|
||||
- [✈️ Telemt 3 is released!](#️-telemt-3-is-released)
|
||||
- [🇷🇺 RU](#-ru)
|
||||
- [Релиз 3.3.5 LTS - 6 марта](#релиз-335-lts---6-марта)
|
||||
- [🇬🇧 EN](#-en)
|
||||
- [Release 3.3.5 LTS - March 6](#release-335-lts---march-6)
|
||||
- [Features](#features)
|
||||
- [GOTO](#goto)
|
||||
- [Quick Start Guide](#quick-start-guide)
|
||||
- [FAQ](#faq)
|
||||
- [Recognizability for DPI and crawler](#recognizability-for-dpi-and-crawler)
|
||||
- [Client WITH secret-key accesses the MTProxy resource:](#client-with-secret-key-accesses-the-mtproxy-resource)
|
||||
- [Client WITHOUT secret-key gets transparent access to the specified resource:](#client-without-secret-key-gets-transparent-access-to-the-specified-resource)
|
||||
- [Telegram Calls via MTProxy](#telegram-calls-via-mtproxy)
|
||||
- [How does DPI see MTProxy TLS?](#how-does-dpi-see-mtproxy-tls)
|
||||
- [Whitelist on IP](#whitelist-on-ip)
|
||||
- [Too many open files](#too-many-open-files)
|
||||
- [Build](#build)
|
||||
- [Why Rust?](#why-rust)
|
||||
- [Issues](#issues)
|
||||
- [Roadmap](#roadmap)
|
||||
|
||||
|
||||
## Quick Start Guide
|
||||
- [Quick Start Guide RU](docs/QUICK_START_GUIDE.ru.md)
|
||||
- [Quick Start Guide EN](docs/QUICK_START_GUIDE.en.md)
|
||||
|
||||
## FAQ
|
||||
|
||||
- [FAQ RU](docs/FAQ.ru.md)
|
||||
- [FAQ EN](docs/FAQ.en.md)
|
||||
|
||||
### Recognizability for DPI and crawler
|
||||
Since version 1.1.0.0, we have debugged masking perfectly: for all clients without "presenting" a key,
|
||||
we transparently direct traffic to the target host!
|
||||
|
||||
- We consider this a breakthrough aspect, which has no stable analogues today
|
||||
- Based on this: if `telemt` configured correctly, **TLS mode is completely identical to real-life handshake + communication** with a specified host
|
||||
- Here is our evidence:
|
||||
- 212.220.88.77 - "dummy" host, running `telemt`
|
||||
- `petrovich.ru` - `tls` + `masking` host, in HEX: `706574726f766963682e7275`
|
||||
- **No MITM + No Fake Certificates/Crypto** = pure transparent *TCP Splice* to "best" upstream: MTProxy or tls/mask-host:
|
||||
- DPI see legitimate HTTPS to `tls_host`, including *valid chain-of-trust* and entropy
|
||||
- Crawlers completely satisfied receiving responses from `mask_host`
|
||||
#### Client WITH secret-key accesses the MTProxy resource:
|
||||
|
||||
<img width="360" height="439" alt="telemt" src="https://github.com/user-attachments/assets/39352afb-4a11-4ecc-9d91-9e8cfb20607d" />
|
||||
|
||||
#### Client WITHOUT secret-key gets transparent access to the specified resource:
|
||||
- with trusted certificate
|
||||
- with original handshake
|
||||
- with full request-response way
|
||||
- with low-latency overhead
|
||||
```bash
|
||||
root@debian:~/telemt# curl -v -I --resolve petrovich.ru:443:212.220.88.77 https://petrovich.ru/
|
||||
* Added petrovich.ru:443:212.220.88.77 to DNS cache
|
||||
* Hostname petrovich.ru was found in DNS cache
|
||||
* Trying 212.220.88.77:443...
|
||||
* Connected to petrovich.ru (212.220.88.77) port 443 (#0)
|
||||
* ALPN: offers h2,http/1.1
|
||||
* TLSv1.3 (OUT), TLS handshake, Client hello (1):
|
||||
* CAfile: /etc/ssl/certs/ca-certificates.crt
|
||||
* CApath: /etc/ssl/certs
|
||||
* TLSv1.3 (IN), TLS handshake, Server hello (2):
|
||||
* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8):
|
||||
* TLSv1.3 (IN), TLS handshake, Certificate (11):
|
||||
* TLSv1.3 (IN), TLS handshake, CERT verify (15):
|
||||
* TLSv1.3 (IN), TLS handshake, Finished (20):
|
||||
* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1):
|
||||
* TLSv1.3 (OUT), TLS handshake, Finished (20):
|
||||
* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384
|
||||
* ALPN: server did not agree on a protocol. Uses default.
|
||||
* Server certificate:
|
||||
* subject: C=RU; ST=Saint Petersburg; L=Saint Petersburg; O=STD Petrovich; CN=*.petrovich.ru
|
||||
* start date: Jan 28 11:21:01 2025 GMT
|
||||
* expire date: Mar 1 11:21:00 2026 GMT
|
||||
* subjectAltName: host "petrovich.ru" matched cert's "petrovich.ru"
|
||||
* issuer: C=BE; O=GlobalSign nv-sa; CN=GlobalSign RSA OV SSL CA 2018
|
||||
* SSL certificate verify ok.
|
||||
* using HTTP/1.x
|
||||
> HEAD / HTTP/1.1
|
||||
> Host: petrovich.ru
|
||||
> User-Agent: curl/7.88.1
|
||||
> Accept: */*
|
||||
>
|
||||
* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):
|
||||
* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):
|
||||
* old SSL session ID is stale, removing
|
||||
< HTTP/1.1 200 OK
|
||||
HTTP/1.1 200 OK
|
||||
< Server: Variti/0.9.3a
|
||||
Server: Variti/0.9.3a
|
||||
< Date: Thu, 01 Jan 2026 00:0000 GMT
|
||||
Date: Thu, 01 Jan 2026 00:0000 GMT
|
||||
< Access-Control-Allow-Origin: *
|
||||
Access-Control-Allow-Origin: *
|
||||
< Content-Type: text/html
|
||||
Content-Type: text/html
|
||||
< Cache-Control: no-store
|
||||
Cache-Control: no-store
|
||||
< Expires: Thu, 01 Jan 2026 00:0000 GMT
|
||||
Expires: Thu, 01 Jan 2026 00:0000 GMT
|
||||
< Pragma: no-cache
|
||||
Pragma: no-cache
|
||||
< Set-Cookie: ipp_uid=XXXXX/XXXXX/XXXXX==; Expires=Tue, 31 Dec 2040 23:59:59 GMT; Domain=.petrovich.ru; Path=/
|
||||
Set-Cookie: ipp_uid=XXXXX/XXXXX/XXXXX==; Expires=Tue, 31 Dec 2040 23:59:59 GMT; Domain=.petrovich.ru; Path=/
|
||||
< Content-Type: text/html
|
||||
Content-Type: text/html
|
||||
< Content-Length: 31253
|
||||
Content-Length: 31253
|
||||
< Connection: keep-alive
|
||||
Connection: keep-alive
|
||||
< Keep-Alive: timeout=60
|
||||
Keep-Alive: timeout=60
|
||||
|
||||
<
|
||||
* Connection #0 to host petrovich.ru left intact
|
||||
|
||||
```
|
||||
- We challenged ourselves, we kept trying and we didn't only *beat the air*: now, we have something to show you
|
||||
- Do not just take our word for it? - This is great and we respect that: you can build your own `telemt` or download a build and check it right now
|
||||
### Telegram Calls via MTProxy
|
||||
- Telegram architecture **does NOT allow calls via MTProxy**, but only via SOCKS5, which cannot be obfuscated
|
||||
### How does DPI see MTProxy TLS?
|
||||
- DPI sees MTProxy in Fake TLS (ee) mode as TLS 1.3
|
||||
- the SNI you specify sends both the client and the server;
|
||||
- ALPN is similar to HTTP 1.1/2;
|
||||
- high entropy, which is normal for AES-encrypted traffic;
|
||||
### Whitelist on IP
|
||||
- MTProxy cannot work when there is:
|
||||
- no IP connectivity to the target host: Russian Whitelist on Mobile Networks - "Белый список"
|
||||
- OR all TCP traffic is blocked
|
||||
- OR high entropy/encrypted traffic is blocked: content filters at universities and critical infrastructure
|
||||
- OR all TLS traffic is blocked
|
||||
- OR specified port is blocked: use 443 to make it "like real"
|
||||
- OR provided SNI is blocked: use "officially approved"/innocuous name
|
||||
- like most protocols on the Internet;
|
||||
- these situations are observed:
|
||||
- in China behind the Great Firewall
|
||||
- in Russia on mobile networks, less in wired networks
|
||||
- in Iran during "activity"
|
||||
### Too many open files
|
||||
- On a fresh Linux install the default open file limit is low; under load `telemt` may fail with `Accept error: Too many open files`
|
||||
- **Systemd**: add `LimitNOFILE=65536` to the `[Service]` section (already included in the example above)
|
||||
- **Docker**: add `--ulimit nofile=65536:65536` to your `docker run` command, or in `docker-compose.yml`:
|
||||
```yaml
|
||||
ulimits:
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
```
|
||||
- **System-wide** (optional): add to `/etc/security/limits.conf`:
|
||||
```
|
||||
* soft nofile 1048576
|
||||
* hard nofile 1048576
|
||||
root soft nofile 1048576
|
||||
root hard nofile 1048576
|
||||
```
|
||||
|
||||
|
||||
## Build
|
||||
```bash
|
||||
# Cloning repo
|
||||
git clone https://github.com/telemt/telemt
|
||||
# Changing Directory to telemt
|
||||
cd telemt
|
||||
# Starting Release Build
|
||||
cargo build --release
|
||||
# Move to /bin
|
||||
mv ./target/release/telemt /bin
|
||||
# Make executable
|
||||
chmod +x /bin/telemt
|
||||
# Lets go!
|
||||
telemt config.toml
|
||||
```
|
||||
|
||||
## Why Rust?
|
||||
- Long-running reliability and idempotent behavior
|
||||
- Rust's deterministic resource management - RAII
|
||||
- No garbage collector
|
||||
- Memory safety and reduced attack surface
|
||||
- Tokio's asynchronous architecture
|
||||
|
||||
## Issues
|
||||
- ✅ [SOCKS5 as Upstream](https://github.com/telemt/telemt/issues/1) -> added Upstream Management
|
||||
- ✅ [iOS - Media Upload Hanging-in-Loop](https://github.com/telemt/telemt/issues/2)
|
||||
|
||||
## Roadmap
|
||||
- Public IP in links
|
||||
- Config Reload-on-fly
|
||||
- Bind to device or IP for outbound/inbound connections
|
||||
- Adtag Support per SNI / Secret
|
||||
- Fail-fast on start + Fail-soft on runtime (only WARN/ERROR)
|
||||
- Zero-copy, minimal allocs on hotpath
|
||||
- DC Healthchecks + global fallback
|
||||
- No global mutable state
|
||||
- Client isolation + Fair Bandwidth
|
||||
- Backpressure-aware IO
|
||||
- "Secret Policy" - SNI / Secret Routing :D
|
||||
- Multi-upstream Balancer and Failover
|
||||
- Strict FSM per handshake
|
||||
- Session-based Antireplay with Sliding window, non-broking reconnects
|
||||
- Web Control: statistic, state of health, latency, client experience...
|
||||
|
||||
34
ROADMAP.md
Normal file
34
ROADMAP.md
Normal file
@@ -0,0 +1,34 @@
|
||||
### 3.0.0 Anschluss
|
||||
- **Middle Proxy now is stable**, confirmed on canary-deploy over ~20 users
|
||||
- Ad-tag now is working
|
||||
- DC=203/CDN now is working over ME
|
||||
- `getProxyConfig` and `ProxySecret` are automated
|
||||
- Version order is now in format `3.0.0` - without Windows-style "microfixes"
|
||||
|
||||
### 3.0.1 Kabelsammler
|
||||
- Handshake timeouts fixed
|
||||
- Connectivity logging refactored
|
||||
- Docker: tmpfs for ProxyConfig and ProxySecret
|
||||
- Public Host and Port in config
|
||||
- ME Relays Head-of-Line Blocking fixed
|
||||
- ME Ping
|
||||
|
||||
### 3.0.2 Microtrencher
|
||||
- New [network] section
|
||||
- ME Fixes
|
||||
- Small bugs coverage
|
||||
|
||||
### 3.0.3 Ausrutscher
|
||||
- ME as stateful, no conn-id migration
|
||||
- No `flush()` on datapath after RpcWriter
|
||||
- Hightech parser for IPv6 without regexp
|
||||
- `nat_probe = true` by default
|
||||
- Timeout for `recv()` in STUN-client
|
||||
- ConnRegistry review
|
||||
- Dualstack emergency reconnect
|
||||
|
||||
### 3.0.4 Schneeflecken
|
||||
- Only WARN and Links in Normal log
|
||||
- Consistent IP-family detection
|
||||
- Includes for config
|
||||
- `nonce_frame_hex` in log only with `DEBUG`
|
||||
697
config.full.toml
Normal file
697
config.full.toml
Normal file
@@ -0,0 +1,697 @@
|
||||
# ==============================================================================
|
||||
#
|
||||
# TELEMT — Advanced Rust-based Telegram MTProto Proxy
|
||||
# Full Configuration Reference
|
||||
#
|
||||
# This file is both a working config and a complete documentation.
|
||||
# Every parameter is explained. Read it top to bottom before deploying.
|
||||
#
|
||||
# Quick Start:
|
||||
# 1. Set [server].port to your desired port (443 recommended)
|
||||
# 2. Generate a secret: openssl rand -hex 16
|
||||
# 3. Put it in [access.users] under a name you choose
|
||||
# 4. Set [censorship].tls_domain to a popular unblocked HTTPS site
|
||||
# 5. Set your public IP in [general].middle_proxy_nat_ip
|
||||
# and [general.links].public_host
|
||||
# 6. Set announce IP in [[server.listeners]]
|
||||
# 7. Run Telemt. It prints a tg:// link. Send it to your users.
|
||||
#
|
||||
# Modes of Operation:
|
||||
# Direct Mode (use_middle_proxy = false)
|
||||
# Connects straight to Telegram DCs via TCP. Simple, fast, low overhead.
|
||||
# No ad_tag support. No CDN DC support (203, etc).
|
||||
#
|
||||
# Middle-Proxy Mode (use_middle_proxy = true)
|
||||
# Connects to Telegram Middle-End servers via RPC protocol.
|
||||
# Required for ad_tag monetization and CDN support.
|
||||
# Requires proxy_secret_path and a valid public IP.
|
||||
#
|
||||
# ==============================================================================
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# LEGACY TOP-LEVEL FIELDS
|
||||
# ==============================================================================
|
||||
|
||||
# Deprecated. Use [general.links].show instead.
|
||||
# Accepts "*" for all users, or an array like ["alice", "bob"].
|
||||
show_link = ["0"]
|
||||
|
||||
# Fallback Datacenter index (1-5) when a client requests an unknown DC ID.
|
||||
# DC 2 is Amsterdam (Europe), closest for most CIS users.
|
||||
# default_dc = 2
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# GENERAL SETTINGS
|
||||
# ==============================================================================
|
||||
|
||||
[general]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Core Protocol
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Coalesce the MTProto handshake and first data payload into a single TCP packet.
|
||||
# Significantly reduces connection latency. No reason to disable.
|
||||
fast_mode = true
|
||||
|
||||
# How the proxy connects to Telegram servers.
|
||||
# false = Direct TCP to Telegram DCs (simple, low overhead)
|
||||
# true = Middle-End RPC protocol (required for ad_tag and CDN DCs)
|
||||
use_middle_proxy = true
|
||||
|
||||
# 32-char hex Ad-Tag from @MTProxybot for sponsored channel injection.
|
||||
# Only works when use_middle_proxy = true.
|
||||
# Obtain yours: message @MTProxybot on Telegram, register your proxy.
|
||||
# ad_tag = "00000000000000000000000000000000"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Middle-End Authentication
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Path to the Telegram infrastructure AES key file.
|
||||
# Auto-downloaded from https://core.telegram.org/getProxySecret on first run.
|
||||
# This key authenticates your proxy with Middle-End servers.
|
||||
proxy_secret_path = "proxy-secret"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public IP Configuration (Critical for Middle-Proxy Mode)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Your server's PUBLIC IPv4 address.
|
||||
# Middle-End servers need this for the cryptographic Key Derivation Function.
|
||||
# If your server has a direct public IP, set it here.
|
||||
# If behind NAT (AWS, Docker, etc.), this MUST be your external IP.
|
||||
# If omitted, Telemt uses STUN to auto-detect (see middle_proxy_nat_probe).
|
||||
# middle_proxy_nat_ip = "203.0.113.10"
|
||||
|
||||
# Auto-detect public IP via STUN servers defined in [network].
|
||||
# Set to false if you hardcoded middle_proxy_nat_ip above.
|
||||
# Set to true if you want automatic detection.
|
||||
middle_proxy_nat_probe = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Middle-End Connection Pool
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Number of persistent multiplexed RPC connections to ME servers.
|
||||
# All client traffic is routed through these "fat pipes".
|
||||
# 8 handles thousands of concurrent users comfortably.
|
||||
middle_proxy_pool_size = 8
|
||||
|
||||
# Legacy field. Connections kept initialized but idle as warm standby.
|
||||
middle_proxy_warm_standby = 16
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Middle-End Keepalive
|
||||
# Telegram ME servers aggressively kill idle TCP connections.
|
||||
# These settings send periodic RPC_PING frames to keep pipes alive.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
me_keepalive_enabled = true
|
||||
|
||||
# Base interval between pings in seconds.
|
||||
me_keepalive_interval_secs = 25
|
||||
|
||||
# Random jitter added to interval to prevent all connections pinging simultaneously.
|
||||
me_keepalive_jitter_secs = 5
|
||||
|
||||
# Randomize ping payload bytes to prevent DPI from fingerprinting ping patterns.
|
||||
me_keepalive_payload_random = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Client-Side Limits
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Max buffered ciphertext per client (bytes) when upstream is slow.
|
||||
# Acts as backpressure to prevent memory exhaustion. 256KB is safe.
|
||||
crypto_pending_buffer = 262144
|
||||
|
||||
# Maximum single MTProto frame size from client. 16MB is protocol standard.
|
||||
max_client_frame = 16777216
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Crypto Desynchronization Logging
|
||||
# Desync errors usually mean DPI/GFW is tampering with connections.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# true = full forensics (trace ID, IP hash, hex dumps) for EVERY desync event
|
||||
# false = deduplicated logging, one entry per time window (prevents log spam)
|
||||
# Set true if you are actively debugging DPI interference.
|
||||
desync_all_full = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Beobachten — Built-in Honeypot / Active Probe Tracker
|
||||
# Tracks IPs that fail handshakes or behave like TLS scanners.
|
||||
# Output file can be fed into fail2ban or iptables for auto-blocking.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
beobachten = true
|
||||
|
||||
# How long (minutes) to remember a suspicious IP before expiring it.
|
||||
beobachten_minutes = 30
|
||||
|
||||
# How often (seconds) to flush tracker state to disk.
|
||||
beobachten_flush_secs = 15
|
||||
|
||||
# File path for the tracker output.
|
||||
beobachten_file = "cache/beobachten.txt"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Hardswap — Zero-Downtime ME Pool Rotation
|
||||
# When Telegram updates ME server IPs, Hardswap creates a completely new pool,
|
||||
# waits until it is fully ready, migrates traffic, then kills the old pool.
|
||||
# Users experience zero interruption.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
hardswap = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ME Pool Warmup Staggering
|
||||
# When creating a new pool, connections are opened one by one with delays
|
||||
# to avoid a burst of SYN packets that could trigger ISP flood protection.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
me_warmup_stagger_enabled = true
|
||||
|
||||
# Delay between each connection creation (milliseconds).
|
||||
me_warmup_step_delay_ms = 500
|
||||
|
||||
# Random jitter added to the delay (milliseconds).
|
||||
me_warmup_step_jitter_ms = 300
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ME Reconnect Backoff
|
||||
# If an ME server drops the connection, Telemt retries with this strategy.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Max simultaneous reconnect attempts per DC.
|
||||
me_reconnect_max_concurrent_per_dc = 8
|
||||
|
||||
# Exponential backoff base (milliseconds).
|
||||
me_reconnect_backoff_base_ms = 500
|
||||
|
||||
# Backoff ceiling (milliseconds). Will never wait longer than this.
|
||||
me_reconnect_backoff_cap_ms = 30000
|
||||
|
||||
# Number of instant retries before switching to exponential backoff.
|
||||
me_reconnect_fast_retry_count = 12
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# NAT Mismatch Behavior
|
||||
# If STUN-detected IP differs from local interface IP (you are behind NAT).
|
||||
# false = abort ME mode (safe default)
|
||||
# true = force ME mode anyway (use if you know your NAT setup is correct)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
stun_iface_mismatch_ignore = false
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Logging
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# File to log unknown DC requests (DC IDs outside standard 1-5).
|
||||
unknown_dc_log_path = "unknown-dc.txt"
|
||||
|
||||
# Verbosity: "debug" | "verbose" | "normal" | "silent"
|
||||
log_level = "normal"
|
||||
|
||||
# Disable ANSI color codes in log output (useful for file logging).
|
||||
disable_colors = false
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# FakeTLS Record Sizing
|
||||
# Buffer small MTProto packets into larger TLS records to mimic real HTTPS.
|
||||
# Real HTTPS servers send records close to MTU size (~1400 bytes).
|
||||
# A stream of tiny TLS records is a strong DPI signal.
|
||||
# Set to 0 to disable. Set to 1400 for realistic HTTPS emulation.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
fast_mode_min_tls_record = 1400
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Periodic Updates
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# How often (seconds) to re-fetch ME server lists and proxy secrets
|
||||
# from core.telegram.org. Keeps your proxy in sync with Telegram infrastructure.
|
||||
update_every = 300
|
||||
|
||||
# How often (seconds) to force a Hardswap even if the ME map is unchanged.
|
||||
# Shorter intervals mean shorter-lived TCP flows, harder for DPI to profile.
|
||||
me_reinit_every_secs = 600
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Hardswap Warmup Tuning
|
||||
# Fine-grained control over how the new pool is warmed up before traffic switch.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
me_hardswap_warmup_delay_min_ms = 1000
|
||||
me_hardswap_warmup_delay_max_ms = 2000
|
||||
me_hardswap_warmup_extra_passes = 3
|
||||
me_hardswap_warmup_pass_backoff_base_ms = 500
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Config Update Debouncing
|
||||
# Telegram sometimes pushes transient/broken configs. Debouncing requires
|
||||
# N consecutive identical fetches before applying a change.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# ME server list must be identical for this many fetches before applying.
|
||||
me_config_stable_snapshots = 2
|
||||
|
||||
# Minimum seconds between config applications.
|
||||
me_config_apply_cooldown_secs = 300
|
||||
|
||||
# Proxy secret must be identical for this many fetches before applying.
|
||||
proxy_secret_stable_snapshots = 2
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Proxy Secret Rotation
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Apply newly downloaded secrets at runtime without restart.
|
||||
proxy_secret_rotate_runtime = true
|
||||
|
||||
# Maximum acceptable secret length (bytes). Rejects abnormally large secrets.
|
||||
proxy_secret_len_max = 256
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Hardswap Drain Settings
|
||||
# Controls graceful shutdown of old ME connections during pool rotation.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Seconds to keep old connections alive for in-flight data before force-closing.
|
||||
me_pool_drain_ttl_secs = 90
|
||||
|
||||
# Minimum ratio of healthy connections in new pool before draining old pool.
|
||||
# 0.8 = at least 80% of new pool must be ready.
|
||||
me_pool_min_fresh_ratio = 0.8
|
||||
|
||||
# Maximum seconds to wait for drain to complete before force-killing.
|
||||
me_reinit_drain_timeout_secs = 120
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# NTP Clock Check
|
||||
# MTProto uses timestamps. Clock drift > 30 seconds breaks handshakes.
|
||||
# Telemt checks on startup and warns if out of sync.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
ntp_check = true
|
||||
ntp_servers = ["pool.ntp.org"]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Auto-Degradation
|
||||
# If ME servers become completely unreachable (ISP blocking),
|
||||
# automatically fall back to Direct Mode so users stay connected.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
auto_degradation_enabled = true
|
||||
|
||||
# Number of DC groups that must be unreachable before triggering fallback.
|
||||
degradation_min_unavailable_dc_groups = 2
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# ALLOWED CLIENT PROTOCOLS
|
||||
# Only enable what you need. In censored regions, TLS-only is safest.
|
||||
# ==============================================================================
|
||||
|
||||
[general.modes]
|
||||
|
||||
# Classic MTProto. Unobfuscated length prefixes. Trivially detected by DPI.
|
||||
# No reason to enable unless you have ancient clients.
|
||||
classic = false
|
||||
|
||||
# Obfuscated MTProto with randomized padding. Better than classic, but
|
||||
# still detectable by statistical analysis of packet sizes.
|
||||
secure = false
|
||||
|
||||
# FakeTLS (ee-secrets). Wraps MTProto in TLS 1.3 framing.
|
||||
# To DPI, it looks like a normal HTTPS connection.
|
||||
# This should be the ONLY enabled mode in censored environments.
|
||||
tls = true
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# STARTUP LINK GENERATION
|
||||
# Controls what tg:// invite links are printed to console on startup.
|
||||
# ==============================================================================
|
||||
|
||||
[general.links]
|
||||
|
||||
# Which users to generate links for.
|
||||
# "*" = all users, or an array like ["alice", "bob"].
|
||||
show = "*"
|
||||
|
||||
# IP or domain to embed in the tg:// link.
|
||||
# If omitted, Telemt uses STUN to auto-detect.
|
||||
# Set this to your server's public IP or domain for reliable links.
|
||||
# public_host = "proxy.example.com"
|
||||
|
||||
# Port to embed in the tg:// link.
|
||||
# If omitted, uses [server].port.
|
||||
# public_port = 443
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# NETWORK & IP RESOLUTION
|
||||
# ==============================================================================
|
||||
|
||||
[network]
|
||||
|
||||
# Enable IPv4 for outbound connections to Telegram.
|
||||
ipv4 = true
|
||||
|
||||
# Enable IPv6 for outbound connections to Telegram.
|
||||
ipv6 = false
|
||||
|
||||
# Prefer IPv4 (4) or IPv6 (6) when both are available.
|
||||
prefer = 4
|
||||
|
||||
# Experimental: use both IPv4 and IPv6 ME servers simultaneously.
|
||||
# May improve reliability but doubles connection count.
|
||||
multipath = false
|
||||
|
||||
# STUN servers for external IP discovery.
|
||||
# Used for Middle-Proxy KDF (if nat_probe=true) and link generation.
|
||||
stun_servers = [
|
||||
"stun.l.google.com:5349",
|
||||
"stun1.l.google.com:3478",
|
||||
"stun.gmx.net:3478",
|
||||
"stun.l.google.com:19302"
|
||||
]
|
||||
|
||||
# If UDP STUN is blocked, attempt TCP-based STUN as fallback.
|
||||
stun_tcp_fallback = true
|
||||
|
||||
# If all STUN fails, use HTTP APIs to discover public IP.
|
||||
http_ip_detect_urls = [
|
||||
"https://ifconfig.me/ip",
|
||||
"https://api.ipify.org"
|
||||
]
|
||||
|
||||
# Cache discovered public IP to this file to survive restarts.
|
||||
cache_public_ip_path = "cache/public_ip.txt"
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# SERVER BINDING & METRICS
|
||||
# ==============================================================================
|
||||
|
||||
[server]
|
||||
|
||||
# TCP port to listen on.
|
||||
# 443 is recommended (looks like normal HTTPS traffic).
|
||||
port = 443
|
||||
|
||||
# IPv4 bind address. "0.0.0.0" = all interfaces.
|
||||
listen_addr_ipv4 = "0.0.0.0"
|
||||
|
||||
# IPv6 bind address. "::" = all interfaces.
|
||||
listen_addr_ipv6 = "::"
|
||||
|
||||
# Unix socket listener (for reverse proxy setups with Nginx/HAProxy).
|
||||
# listen_unix_sock = "/var/run/telemt.sock"
|
||||
# listen_unix_sock_perm = "0660"
|
||||
|
||||
# Enable PROXY protocol header parsing.
|
||||
# Set true ONLY if Telemt is behind HAProxy/Nginx that injects PROXY headers.
|
||||
# If enabled without a proxy in front, clients will fail to connect.
|
||||
proxy_protocol = false
|
||||
|
||||
# Prometheus metrics HTTP endpoint port.
|
||||
# Uncomment to enable. Access at http://your-server:9090/metrics
|
||||
# metrics_port = 9090
|
||||
|
||||
# IP ranges allowed to access the metrics endpoint.
|
||||
metrics_whitelist = [
|
||||
"127.0.0.1/32",
|
||||
"::1/128"
|
||||
]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Listener Overrides
|
||||
# Define explicit listeners with specific bind IPs and announce IPs.
|
||||
# The announce IP is what gets embedded in tg:// links and sent to ME servers.
|
||||
# You MUST set announce to your server's public IP for ME mode to work.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# [[server.listeners]]
|
||||
# ip = "0.0.0.0"
|
||||
# announce = "203.0.113.10"
|
||||
# reuse_allow = false
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# TIMEOUTS (seconds unless noted)
|
||||
# ==============================================================================
|
||||
|
||||
[timeouts]
|
||||
|
||||
# Maximum time for client to complete FakeTLS + MTProto handshake.
|
||||
client_handshake = 15
|
||||
|
||||
# Maximum time to establish TCP connection to upstream Telegram DC.
|
||||
tg_connect = 10
|
||||
|
||||
# TCP keepalive interval for client connections.
|
||||
client_keepalive = 60
|
||||
|
||||
# Maximum client inactivity before dropping the connection.
|
||||
client_ack = 300
|
||||
|
||||
# Instant retry count for a single ME endpoint before giving up on it.
|
||||
me_one_retry = 3
|
||||
|
||||
# Timeout (milliseconds) for a single ME endpoint connection attempt.
|
||||
me_one_timeout_ms = 1500
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# ANTI-CENSORSHIP / FAKETLS / MASKING
|
||||
# This is where Telemt becomes invisible to Deep Packet Inspection.
|
||||
# ==============================================================================
|
||||
|
||||
[censorship]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# TLS Domain Fronting
|
||||
# The SNI (Server Name Indication) your proxy presents to connecting clients.
|
||||
# Must be a popular, unblocked HTTPS website in your target country.
|
||||
# DPI sees traffic to this domain. Choose carefully.
|
||||
# Good choices: major CDNs, banks, government sites, search engines.
|
||||
# Bad choices: obscure sites, already-blocked domains.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
tls_domain = "www.google.com"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Active Probe Masking
|
||||
# When someone connects but fails the MTProto handshake (wrong secret),
|
||||
# they might be an ISP active prober testing if this is a proxy.
|
||||
#
|
||||
# mask = false: drop the connection (prober knows something is here)
|
||||
# mask = true: transparently proxy them to mask_host (prober sees a real website)
|
||||
#
|
||||
# With mask enabled, your server is indistinguishable from a real web server
|
||||
# to anyone who doesn't have the correct secret.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
mask = true
|
||||
|
||||
# The real web server to forward failed handshakes to.
|
||||
# If omitted, defaults to tls_domain.
|
||||
# mask_host = "www.google.com"
|
||||
|
||||
# Port on the mask host to connect to.
|
||||
mask_port = 443
|
||||
|
||||
# Inject PROXY protocol header when forwarding to mask host.
|
||||
# 0 = disabled, 1 = v1, 2 = v2. Leave disabled unless mask_host expects it.
|
||||
# mask_proxy_protocol = 0
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# TLS Certificate Emulation
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Size (bytes) of the locally generated fake TLS certificate.
|
||||
# Only used when tls_emulation is disabled.
|
||||
fake_cert_len = 2048
|
||||
|
||||
# KILLER FEATURE: Real-Time TLS Emulation.
|
||||
# Telemt connects to tls_domain, fetches its actual TLS 1.3 certificate chain,
|
||||
# and exactly replicates the byte sizes of ServerHello and Certificate records.
|
||||
# Defeats DPI that uses TLS record length heuristics to detect proxies.
|
||||
# Strongly recommended in censored environments.
|
||||
tls_emulation = true
|
||||
|
||||
# Directory to cache fetched TLS certificates.
|
||||
tls_front_dir = "tlsfront"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ServerHello Timing
|
||||
# Real web servers take 30-150ms to respond to ClientHello due to network
|
||||
# latency and crypto processing. A proxy responding in <1ms is suspicious.
|
||||
# These settings add realistic delay to mimic genuine server behavior.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Minimum delay before sending ServerHello (milliseconds).
|
||||
server_hello_delay_min_ms = 50
|
||||
|
||||
# Maximum delay before sending ServerHello (milliseconds).
|
||||
server_hello_delay_max_ms = 150
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# TLS Session Tickets
|
||||
# Real TLS 1.3 servers send 1-2 NewSessionTicket messages after handshake.
|
||||
# A server that sends zero tickets is anomalous and may trigger DPI flags.
|
||||
# Set this to match your tls_domain's behavior (usually 2).
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# tls_new_session_tickets = 0
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Full Certificate Frequency
|
||||
# When tls_emulation is enabled, this controls how often (per client IP)
|
||||
# to send the complete emulated certificate chain.
|
||||
#
|
||||
# > 0: Subsequent connections within TTL seconds get a smaller cached version.
|
||||
# Saves bandwidth but creates a detectable size difference between
|
||||
# first and repeat connections.
|
||||
#
|
||||
# = 0: Every connection gets the full certificate. More bandwidth but
|
||||
# perfectly consistent behavior, no anomalies for DPI to detect.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
tls_full_cert_ttl_secs = 0
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ALPN Enforcement
|
||||
# Ensure ServerHello responds with the exact ALPN protocol the client requested.
|
||||
# Mismatched ALPN (e.g., client asks h2, server says http/1.1) is a DPI red flag.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
alpn_enforce = true
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# ACCESS CONTROL & USERS
|
||||
# ==============================================================================
|
||||
|
||||
[access]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Replay Attack Protection
|
||||
# DPI can record a legitimate user's handshake and replay it later to probe
|
||||
# whether the server is a proxy. Telemt remembers recent handshake nonces
|
||||
# and rejects duplicates.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Number of nonce slots in the replay detection buffer.
|
||||
replay_check_len = 65536
|
||||
|
||||
# How long (seconds) to remember nonces before expiring them.
|
||||
replay_window_secs = 1800
|
||||
|
||||
# Allow clients with incorrect system clocks to connect.
|
||||
# false = reject clients with significant time skew (more secure)
|
||||
# true = accept anyone regardless of clock (more permissive)
|
||||
ignore_time_skew = false
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# User Secrets
|
||||
# Each user needs a unique 32-character hex string as their secret.
|
||||
# Generate with: openssl rand -hex 16
|
||||
#
|
||||
# This secret is embedded in the tg:// link. Anyone with it can connect.
|
||||
# Format: username = "hex_secret"
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.users]
|
||||
# alice = "0123456789abcdef0123456789abcdef"
|
||||
# bob = "fedcba9876543210fedcba9876543210"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Connection Limits
|
||||
# Limits concurrent TCP connections per user to prevent secret sharing.
|
||||
# Uncomment and set for each user as needed.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_max_tcp_conns]
|
||||
# alice = 100
|
||||
# bob = 50
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Expiration Dates
|
||||
# Automatically revoke access after the specified date (ISO 8601 format).
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_expirations]
|
||||
# alice = "2025-12-31T23:59:59Z"
|
||||
# bob = "2026-06-15T00:00:00Z"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Data Quotas
|
||||
# Maximum total bytes transferred per user. Connection refused after limit.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_data_quota]
|
||||
# alice = 107374182400
|
||||
# bob = 53687091200
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Unique IP Limits
|
||||
# Maximum number of different IP addresses that can use this secret
|
||||
# at the same time. Highly effective against secret leaking/sharing.
|
||||
# Set to 1 for single-device, 2-3 for phone+desktop, etc.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_max_unique_ips]
|
||||
# alice = 3
|
||||
# bob = 2
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# UPSTREAM ROUTING
|
||||
# Controls how Telemt connects to Telegram servers (or ME servers).
|
||||
# If omitted entirely, uses the OS default route.
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Direct upstream: use the server's own network interface.
|
||||
# You can optionally bind to a specific interface or local IP.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# [[upstreams]]
|
||||
# type = "direct"
|
||||
# interface = "eth0"
|
||||
# bind_addresses = ["192.0.2.10"]
|
||||
# weight = 1
|
||||
# enabled = true
|
||||
# scopes = "*"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# SOCKS5 upstream: route Telegram traffic through a SOCKS5 proxy.
|
||||
# Useful if your server's IP is blocked from reaching Telegram DCs.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# [[upstreams]]
|
||||
# type = "socks5"
|
||||
# address = "198.51.100.30:1080"
|
||||
# username = "proxy-user"
|
||||
# password = "proxy-pass"
|
||||
# weight = 1
|
||||
# enabled = true
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# DATACENTER OVERRIDES
|
||||
# Force specific DC IDs to route to specific IP:Port combinations.
|
||||
# DC 203 (CDN) is auto-injected by Telemt if not specified here.
|
||||
# ==============================================================================
|
||||
|
||||
# [dc_overrides]
|
||||
# "201" = "149.154.175.50:443"
|
||||
# "202" = ["149.154.167.51:443", "149.154.175.100:443"]
|
||||
62
config.toml
62
config.toml
@@ -1,13 +1,57 @@
|
||||
port = 443
|
||||
### Telemt Based Config.toml
|
||||
# We believe that these settings are sufficient for most scenarios
|
||||
# where cutting-egde methods and parameters or special solutions are not needed
|
||||
|
||||
[users]
|
||||
user1 = "00000000000000000000000000000000"
|
||||
# === General Settings ===
|
||||
[general]
|
||||
use_middle_proxy = true
|
||||
# Global ad_tag fallback when user has no per-user tag in [access.user_ad_tags]
|
||||
# ad_tag = "00000000000000000000000000000000"
|
||||
# Per-user ad_tag in [access.user_ad_tags] (32 hex from @MTProxybot)
|
||||
|
||||
[modes]
|
||||
classic = true
|
||||
secure = true
|
||||
# === Log Level ===
|
||||
# Log level: debug | verbose | normal | silent
|
||||
# Can be overridden with --silent or --log-level CLI flags
|
||||
# RUST_LOG env var takes absolute priority over all of these
|
||||
log_level = "normal"
|
||||
|
||||
[general.modes]
|
||||
classic = false
|
||||
secure = false
|
||||
tls = true
|
||||
|
||||
tls_domain = "www.github.com"
|
||||
fast_mode = true
|
||||
prefer_ipv6 = false
|
||||
[general.links]
|
||||
show = "*"
|
||||
# show = ["alice", "bob"] # Only show links for alice and bob
|
||||
# show = "*" # Show links for all users
|
||||
# public_host = "proxy.example.com" # Host (IP or domain) for tg:// links
|
||||
# public_port = 443 # Port for tg:// links (default: server.port)
|
||||
|
||||
# === Server Binding ===
|
||||
[server]
|
||||
port = 443
|
||||
# proxy_protocol = false # Enable if behind HAProxy/nginx with PROXY protocol
|
||||
# metrics_port = 9090
|
||||
# metrics_whitelist = ["127.0.0.1", "::1", "0.0.0.0/0"]
|
||||
|
||||
[server.api]
|
||||
enabled = true
|
||||
listen = "0.0.0.0:9091"
|
||||
whitelist = ["127.0.0.0/8"]
|
||||
minimal_runtime_enabled = false
|
||||
minimal_runtime_cache_ttl_ms = 1000
|
||||
|
||||
# Listen on multiple interfaces/IPs - IPv4
|
||||
[[server.listeners]]
|
||||
ip = "0.0.0.0"
|
||||
|
||||
# === Anti-Censorship & Masking ===
|
||||
[censorship]
|
||||
tls_domain = "petrovich.ru"
|
||||
mask = true
|
||||
tls_emulation = true # Fetch real cert lengths and emulate TLS records
|
||||
tls_front_dir = "tlsfront" # Cache directory for TLS emulation
|
||||
|
||||
[access.users]
|
||||
# format: "username" = "32_hex_chars_secret"
|
||||
hello = "00000000000000000000000000000000"
|
||||
|
||||
30
docker-compose.yml
Normal file
30
docker-compose.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
services:
|
||||
telemt:
|
||||
image: ghcr.io/telemt/telemt:latest
|
||||
build: .
|
||||
container_name: telemt
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "443:443"
|
||||
- "127.0.0.1:9090:9090"
|
||||
# Allow caching 'proxy-secret' in read-only container
|
||||
working_dir: /run/telemt
|
||||
volumes:
|
||||
- ./config.toml:/run/telemt/config.toml:ro
|
||||
tmpfs:
|
||||
- /run/telemt:rw,mode=1777,size=1m
|
||||
environment:
|
||||
- RUST_LOG=info
|
||||
# Uncomment this line if you want to use host network for IPv6, but bridge is default and usually better
|
||||
# network_mode: host
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- NET_BIND_SERVICE # allow binding to port 443
|
||||
read_only: true
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
ulimits:
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
673
docs/API.md
Normal file
673
docs/API.md
Normal file
@@ -0,0 +1,673 @@
|
||||
# Telemt Control API
|
||||
|
||||
## Purpose
|
||||
Control-plane HTTP API for runtime visibility and user/config management.
|
||||
Data-plane MTProto traffic is out of scope.
|
||||
|
||||
## Runtime Configuration
|
||||
API runtime is configured in `[server.api]`.
|
||||
|
||||
| Field | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` | `bool` | `false` | Enables REST API listener. |
|
||||
| `listen` | `string` (`IP:PORT`) | `127.0.0.1:9091` | API bind address. |
|
||||
| `whitelist` | `CIDR[]` | `127.0.0.1/32, ::1/128` | Source IP allowlist. Empty list means allow all. |
|
||||
| `auth_header` | `string` | `""` | Exact value for `Authorization` header. Empty disables header auth. |
|
||||
| `request_body_limit_bytes` | `usize` | `65536` | Maximum request body size. Must be `> 0`. |
|
||||
| `minimal_runtime_enabled` | `bool` | `false` | Enables runtime snapshot endpoints requiring ME pool read-lock aggregation. |
|
||||
| `minimal_runtime_cache_ttl_ms` | `u64` | `1000` | Cache TTL for minimal snapshots. `0` disables cache; valid range is `[0, 60000]`. |
|
||||
| `runtime_edge_enabled` | `bool` | `false` | Enables runtime edge endpoints with cached aggregation payloads. |
|
||||
| `runtime_edge_cache_ttl_ms` | `u64` | `1000` | Cache TTL for runtime edge summary payloads. `0` disables cache. |
|
||||
| `runtime_edge_top_n` | `usize` | `10` | Top-N rows for runtime edge leaderboard payloads. |
|
||||
| `runtime_edge_events_capacity` | `usize` | `256` | Ring-buffer size for `/v1/runtime/events/recent`. |
|
||||
| `read_only` | `bool` | `false` | Disables mutating endpoints. |
|
||||
|
||||
`server.admin_api` is accepted as an alias for backward compatibility.
|
||||
|
||||
Runtime validation for API config:
|
||||
- `server.api.listen` must be a valid `IP:PORT`.
|
||||
- `server.api.request_body_limit_bytes` must be `> 0`.
|
||||
- `server.api.minimal_runtime_cache_ttl_ms` must be within `[0, 60000]`.
|
||||
- `server.api.runtime_edge_cache_ttl_ms` must be within `[0, 60000]`.
|
||||
- `server.api.runtime_edge_top_n` must be within `[1, 1000]`.
|
||||
- `server.api.runtime_edge_events_capacity` must be within `[16, 4096]`.
|
||||
|
||||
## Protocol Contract
|
||||
|
||||
| Item | Value |
|
||||
| --- | --- |
|
||||
| Transport | HTTP/1.1 |
|
||||
| Content type | `application/json; charset=utf-8` |
|
||||
| Prefix | `/v1` |
|
||||
| Optimistic concurrency | `If-Match: <revision>` on mutating requests (optional) |
|
||||
| Revision format | SHA-256 hex of current `config.toml` content |
|
||||
|
||||
### Success Envelope
|
||||
```json
|
||||
{
|
||||
"ok": true,
|
||||
"data": {},
|
||||
"revision": "sha256-hex"
|
||||
}
|
||||
```
|
||||
|
||||
### Error Envelope
|
||||
```json
|
||||
{
|
||||
"ok": false,
|
||||
"error": {
|
||||
"code": "machine_code",
|
||||
"message": "human-readable"
|
||||
},
|
||||
"request_id": 1
|
||||
}
|
||||
```
|
||||
|
||||
## Request Processing Order
|
||||
|
||||
Requests are processed in this order:
|
||||
1. `api_enabled` gate (`503 api_disabled` if disabled).
|
||||
2. Source IP whitelist gate (`403 forbidden`).
|
||||
3. `Authorization` header gate when configured (`401 unauthorized`).
|
||||
4. Route and method matching (`404 not_found` or `405 method_not_allowed`).
|
||||
5. `read_only` gate for mutating routes (`403 read_only`).
|
||||
6. Request body read/limit/JSON decode (`413 payload_too_large`, `400 bad_request`).
|
||||
7. Business validation and config write path.
|
||||
|
||||
Notes:
|
||||
- Whitelist is evaluated against the direct TCP peer IP (`SocketAddr::ip`), without `X-Forwarded-For` support.
|
||||
- `Authorization` check is exact string equality against configured `auth_header`.
|
||||
|
||||
## Endpoint Matrix
|
||||
|
||||
| Method | Path | Body | Success | `data` contract |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| `GET` | `/v1/health` | none | `200` | `HealthData` |
|
||||
| `GET` | `/v1/system/info` | none | `200` | `SystemInfoData` |
|
||||
| `GET` | `/v1/runtime/gates` | none | `200` | `RuntimeGatesData` |
|
||||
| `GET` | `/v1/limits/effective` | none | `200` | `EffectiveLimitsData` |
|
||||
| `GET` | `/v1/security/posture` | none | `200` | `SecurityPostureData` |
|
||||
| `GET` | `/v1/security/whitelist` | none | `200` | `SecurityWhitelistData` |
|
||||
| `GET` | `/v1/stats/summary` | none | `200` | `SummaryData` |
|
||||
| `GET` | `/v1/stats/zero/all` | none | `200` | `ZeroAllData` |
|
||||
| `GET` | `/v1/stats/upstreams` | none | `200` | `UpstreamsData` |
|
||||
| `GET` | `/v1/stats/minimal/all` | none | `200` | `MinimalAllData` |
|
||||
| `GET` | `/v1/stats/me-writers` | none | `200` | `MeWritersData` |
|
||||
| `GET` | `/v1/stats/dcs` | none | `200` | `DcStatusData` |
|
||||
| `GET` | `/v1/runtime/me_pool_state` | none | `200` | `RuntimeMePoolStateData` |
|
||||
| `GET` | `/v1/runtime/me_quality` | none | `200` | `RuntimeMeQualityData` |
|
||||
| `GET` | `/v1/runtime/upstream_quality` | none | `200` | `RuntimeUpstreamQualityData` |
|
||||
| `GET` | `/v1/runtime/nat_stun` | none | `200` | `RuntimeNatStunData` |
|
||||
| `GET` | `/v1/runtime/connections/summary` | none | `200` | `RuntimeEdgeConnectionsSummaryData` |
|
||||
| `GET` | `/v1/runtime/events/recent` | none | `200` | `RuntimeEdgeEventsData` |
|
||||
| `GET` | `/v1/stats/users` | none | `200` | `UserInfo[]` |
|
||||
| `GET` | `/v1/users` | none | `200` | `UserInfo[]` |
|
||||
| `POST` | `/v1/users` | `CreateUserRequest` | `201` | `CreateUserResponse` |
|
||||
| `GET` | `/v1/users/{username}` | none | `200` | `UserInfo` |
|
||||
| `PATCH` | `/v1/users/{username}` | `PatchUserRequest` | `200` | `UserInfo` |
|
||||
| `DELETE` | `/v1/users/{username}` | none | `200` | `string` (deleted username) |
|
||||
| `POST` | `/v1/users/{username}/rotate-secret` | `RotateSecretRequest` or empty body | `404` | `ErrorResponse` (`not_found`, current runtime behavior) |
|
||||
|
||||
## Common Error Codes
|
||||
|
||||
| HTTP | `error.code` | Trigger |
|
||||
| --- | --- | --- |
|
||||
| `400` | `bad_request` | Invalid JSON, validation failures, malformed request body. |
|
||||
| `401` | `unauthorized` | Missing/invalid `Authorization` when `auth_header` is configured. |
|
||||
| `403` | `forbidden` | Source IP is not allowed by whitelist. |
|
||||
| `403` | `read_only` | Mutating endpoint called while `read_only=true`. |
|
||||
| `404` | `not_found` | Unknown route, unknown user, or unsupported sub-route (including current `rotate-secret` route). |
|
||||
| `405` | `method_not_allowed` | Unsupported method for `/v1/users/{username}` route shape. |
|
||||
| `409` | `revision_conflict` | `If-Match` revision mismatch. |
|
||||
| `409` | `user_exists` | User already exists on create. |
|
||||
| `409` | `last_user_forbidden` | Attempt to delete last configured user. |
|
||||
| `413` | `payload_too_large` | Body exceeds `request_body_limit_bytes`. |
|
||||
| `500` | `internal_error` | Internal error (I/O, serialization, config load/save). |
|
||||
| `503` | `api_disabled` | API disabled in config. |
|
||||
|
||||
## Routing and Method Edge Cases
|
||||
|
||||
| Case | Behavior |
|
||||
| --- | --- |
|
||||
| Path matching | Exact match on `req.uri().path()`. Query string does not affect route matching. |
|
||||
| Trailing slash | Not normalized. Example: `/v1/users/` is `404`. |
|
||||
| Username route with extra slash | `/v1/users/{username}/...` is not treated as user route and returns `404`. |
|
||||
| `PUT /v1/users/{username}` | `405 method_not_allowed`. |
|
||||
| `POST /v1/users/{username}` | `404 not_found`. |
|
||||
| `POST /v1/users/{username}/rotate-secret` | `404 not_found` in current release due route matcher limitation. |
|
||||
|
||||
## Body and JSON Semantics
|
||||
|
||||
- Request body is read only for mutating routes that define a body contract.
|
||||
- Body size limit is enforced during streaming read (`413 payload_too_large`).
|
||||
- Invalid transport body frame returns `400 bad_request` (`Invalid request body`).
|
||||
- Invalid JSON returns `400 bad_request` (`Invalid JSON body`).
|
||||
- `Content-Type` is not required for JSON parsing.
|
||||
- Unknown JSON fields are ignored by deserialization.
|
||||
- `PATCH` updates only provided fields and does not support explicit clearing of optional fields.
|
||||
- `If-Match` supports both quoted and unquoted values; surrounding whitespace is trimmed.
|
||||
|
||||
## Request Contracts
|
||||
|
||||
### `CreateUserRequest`
|
||||
| Field | Type | Required | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `username` | `string` | yes | `[A-Za-z0-9_.-]`, length `1..64`. |
|
||||
| `secret` | `string` | no | Exactly 32 hex chars. If missing, generated automatically. |
|
||||
| `user_ad_tag` | `string` | no | Exactly 32 hex chars. |
|
||||
| `max_tcp_conns` | `usize` | no | Per-user concurrent TCP limit. |
|
||||
| `expiration_rfc3339` | `string` | no | RFC3339 expiration timestamp. |
|
||||
| `data_quota_bytes` | `u64` | no | Per-user traffic quota. |
|
||||
| `max_unique_ips` | `usize` | no | Per-user unique source IP limit. |
|
||||
|
||||
### `PatchUserRequest`
|
||||
| Field | Type | Required | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `secret` | `string` | no | Exactly 32 hex chars. |
|
||||
| `user_ad_tag` | `string` | no | Exactly 32 hex chars. |
|
||||
| `max_tcp_conns` | `usize` | no | Per-user concurrent TCP limit. |
|
||||
| `expiration_rfc3339` | `string` | no | RFC3339 expiration timestamp. |
|
||||
| `data_quota_bytes` | `u64` | no | Per-user traffic quota. |
|
||||
| `max_unique_ips` | `usize` | no | Per-user unique source IP limit. |
|
||||
|
||||
### `RotateSecretRequest`
|
||||
| Field | Type | Required | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `secret` | `string` | no | Exactly 32 hex chars. If missing, generated automatically. |
|
||||
|
||||
Note: the request contract is defined, but the corresponding route currently returns `404` (see routing edge cases).
|
||||
|
||||
## Response Data Contracts
|
||||
|
||||
### `HealthData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `status` | `string` | Always `"ok"`. |
|
||||
| `read_only` | `bool` | Mirrors current API `read_only` mode. |
|
||||
|
||||
### `SummaryData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `uptime_seconds` | `f64` | Process uptime in seconds. |
|
||||
| `connections_total` | `u64` | Total accepted client connections. |
|
||||
| `connections_bad_total` | `u64` | Failed/invalid client connections. |
|
||||
| `handshake_timeouts_total` | `u64` | Handshake timeout count. |
|
||||
| `configured_users` | `usize` | Number of configured users in config. |
|
||||
|
||||
### `SystemInfoData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `version` | `string` | Binary version (`CARGO_PKG_VERSION`). |
|
||||
| `target_arch` | `string` | Target architecture (`std::env::consts::ARCH`). |
|
||||
| `target_os` | `string` | Target OS (`std::env::consts::OS`). |
|
||||
| `build_profile` | `string` | Build profile (`PROFILE` env when available). |
|
||||
| `git_commit` | `string?` | Optional commit hash from build env metadata. |
|
||||
| `build_time_utc` | `string?` | Optional build timestamp from build env metadata. |
|
||||
| `rustc_version` | `string?` | Optional compiler version from build env metadata. |
|
||||
| `process_started_at_epoch_secs` | `u64` | Process start time as Unix epoch seconds. |
|
||||
| `uptime_seconds` | `f64` | Process uptime in seconds. |
|
||||
| `config_path` | `string` | Active config file path used by runtime. |
|
||||
| `config_hash` | `string` | SHA-256 hash of current config content (same value as envelope `revision`). |
|
||||
| `config_reload_count` | `u64` | Number of successfully observed config updates since process start. |
|
||||
| `last_config_reload_epoch_secs` | `u64?` | Unix epoch seconds of the latest observed config reload; null/absent before first reload. |
|
||||
|
||||
### `RuntimeGatesData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `accepting_new_connections` | `bool` | Current admission-gate state for new listener accepts. |
|
||||
| `conditional_cast_enabled` | `bool` | Whether conditional ME admission logic is enabled (`general.use_middle_proxy`). |
|
||||
| `me_runtime_ready` | `bool` | Current ME runtime readiness status used for conditional gate decisions. |
|
||||
| `me2dc_fallback_enabled` | `bool` | Whether ME -> direct fallback is enabled. |
|
||||
| `use_middle_proxy` | `bool` | Current transport mode preference. |
|
||||
|
||||
### `EffectiveLimitsData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `update_every_secs` | `u64` | Effective unified updater interval. |
|
||||
| `me_reinit_every_secs` | `u64` | Effective ME periodic reinit interval. |
|
||||
| `me_pool_force_close_secs` | `u64` | Effective stale-writer force-close timeout. |
|
||||
| `timeouts` | `EffectiveTimeoutLimits` | Effective timeout policy snapshot. |
|
||||
| `upstream` | `EffectiveUpstreamLimits` | Effective upstream connect/retry limits. |
|
||||
| `middle_proxy` | `EffectiveMiddleProxyLimits` | Effective ME pool/floor/reconnect limits. |
|
||||
| `user_ip_policy` | `EffectiveUserIpPolicyLimits` | Effective unique-IP policy mode/window. |
|
||||
|
||||
#### `EffectiveTimeoutLimits`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `client_handshake_secs` | `u64` | Client handshake timeout. |
|
||||
| `tg_connect_secs` | `u64` | Upstream Telegram connect timeout. |
|
||||
| `client_keepalive_secs` | `u64` | Client keepalive interval. |
|
||||
| `client_ack_secs` | `u64` | ACK timeout. |
|
||||
| `me_one_retry` | `u8` | Fast retry count for single-endpoint ME DC. |
|
||||
| `me_one_timeout_ms` | `u64` | Fast retry timeout per attempt for single-endpoint ME DC. |
|
||||
|
||||
#### `EffectiveUpstreamLimits`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `connect_retry_attempts` | `u32` | Upstream connect retry attempts. |
|
||||
| `connect_retry_backoff_ms` | `u64` | Upstream retry backoff delay. |
|
||||
| `connect_budget_ms` | `u64` | Total connect wall-clock budget across retries. |
|
||||
| `unhealthy_fail_threshold` | `u32` | Consecutive fail threshold for unhealthy marking. |
|
||||
| `connect_failfast_hard_errors` | `bool` | Whether hard errors skip additional retries. |
|
||||
|
||||
#### `EffectiveMiddleProxyLimits`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `floor_mode` | `string` | Effective floor mode (`static` or `adaptive`). |
|
||||
| `adaptive_floor_idle_secs` | `u64` | Adaptive floor idle threshold. |
|
||||
| `adaptive_floor_min_writers_single_endpoint` | `u8` | Adaptive floor minimum for single-endpoint DCs. |
|
||||
| `adaptive_floor_recover_grace_secs` | `u64` | Adaptive floor recovery grace period. |
|
||||
| `reconnect_max_concurrent_per_dc` | `u32` | Max concurrent reconnects per DC. |
|
||||
| `reconnect_backoff_base_ms` | `u64` | Reconnect base backoff. |
|
||||
| `reconnect_backoff_cap_ms` | `u64` | Reconnect backoff cap. |
|
||||
| `reconnect_fast_retry_count` | `u32` | Number of fast retries before standard backoff strategy. |
|
||||
| `me2dc_fallback` | `bool` | Effective ME -> direct fallback flag. |
|
||||
|
||||
#### `EffectiveUserIpPolicyLimits`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `mode` | `string` | Unique-IP policy mode (`active_window`, `time_window`, `combined`). |
|
||||
| `window_secs` | `u64` | Time window length used by unique-IP policy. |
|
||||
|
||||
### `SecurityPostureData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `api_read_only` | `bool` | Current API read-only state. |
|
||||
| `api_whitelist_enabled` | `bool` | Whether whitelist filtering is active. |
|
||||
| `api_whitelist_entries` | `usize` | Number of configured whitelist CIDRs. |
|
||||
| `api_auth_header_enabled` | `bool` | Whether `Authorization` header validation is active. |
|
||||
| `proxy_protocol_enabled` | `bool` | Global PROXY protocol accept setting. |
|
||||
| `log_level` | `string` | Effective log level (`debug`, `verbose`, `normal`, `silent`). |
|
||||
| `telemetry_core_enabled` | `bool` | Core telemetry toggle. |
|
||||
| `telemetry_user_enabled` | `bool` | Per-user telemetry toggle. |
|
||||
| `telemetry_me_level` | `string` | ME telemetry level (`silent`, `normal`, `debug`). |
|
||||
|
||||
### `SecurityWhitelistData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot generation timestamp. |
|
||||
| `enabled` | `bool` | `true` when whitelist has at least one CIDR entry. |
|
||||
| `entries_total` | `usize` | Number of whitelist CIDR entries. |
|
||||
| `entries` | `string[]` | Whitelist CIDR entries as strings. |
|
||||
|
||||
### Runtime Min Endpoints
|
||||
- `/v1/runtime/me_pool_state`: generations, hardswap state, writer contour/health counts, refill inflight snapshot.
|
||||
- `/v1/runtime/me_quality`: ME error/drift/reconnect counters and per-DC RTT coverage snapshot.
|
||||
- `/v1/runtime/upstream_quality`: upstream runtime policy, connect counters, health summary and per-upstream DC latency/IP preference.
|
||||
- `/v1/runtime/nat_stun`: NAT/STUN runtime flags, server lists, reflection cache state and backoff remaining.
|
||||
|
||||
### Runtime Edge Endpoints
|
||||
- `/v1/runtime/connections/summary`: cached connection totals (`total/me/direct`), active users and top-N users by connections/traffic.
|
||||
- `/v1/runtime/events/recent?limit=N`: bounded control-plane ring-buffer events (`limit` clamped to `[1, 1000]`).
|
||||
- If `server.api.runtime_edge_enabled=false`, runtime edge endpoints return `enabled=false` with `reason=feature_disabled`.
|
||||
|
||||
### `ZeroAllData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot time (Unix epoch seconds). |
|
||||
| `core` | `ZeroCoreData` | Core counters and telemetry policy snapshot. |
|
||||
| `upstream` | `ZeroUpstreamData` | Upstream connect counters/histogram buckets. |
|
||||
| `middle_proxy` | `ZeroMiddleProxyData` | ME protocol/health counters. |
|
||||
| `pool` | `ZeroPoolData` | ME pool lifecycle counters. |
|
||||
| `desync` | `ZeroDesyncData` | Frame desync counters. |
|
||||
|
||||
#### `ZeroCoreData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `uptime_seconds` | `f64` | Process uptime. |
|
||||
| `connections_total` | `u64` | Total accepted connections. |
|
||||
| `connections_bad_total` | `u64` | Failed/invalid connections. |
|
||||
| `handshake_timeouts_total` | `u64` | Handshake timeouts. |
|
||||
| `configured_users` | `usize` | Configured user count. |
|
||||
| `telemetry_core_enabled` | `bool` | Core telemetry toggle. |
|
||||
| `telemetry_user_enabled` | `bool` | User telemetry toggle. |
|
||||
| `telemetry_me_level` | `string` | ME telemetry level (`off|normal|verbose`). |
|
||||
|
||||
#### `ZeroUpstreamData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `connect_attempt_total` | `u64` | Total upstream connect attempts. |
|
||||
| `connect_success_total` | `u64` | Successful upstream connects. |
|
||||
| `connect_fail_total` | `u64` | Failed upstream connects. |
|
||||
| `connect_failfast_hard_error_total` | `u64` | Fail-fast hard errors. |
|
||||
| `connect_attempts_bucket_1` | `u64` | Connect attempts resolved in 1 try. |
|
||||
| `connect_attempts_bucket_2` | `u64` | Connect attempts resolved in 2 tries. |
|
||||
| `connect_attempts_bucket_3_4` | `u64` | Connect attempts resolved in 3-4 tries. |
|
||||
| `connect_attempts_bucket_gt_4` | `u64` | Connect attempts requiring more than 4 tries. |
|
||||
| `connect_duration_success_bucket_le_100ms` | `u64` | Successful connects <=100 ms. |
|
||||
| `connect_duration_success_bucket_101_500ms` | `u64` | Successful connects 101-500 ms. |
|
||||
| `connect_duration_success_bucket_501_1000ms` | `u64` | Successful connects 501-1000 ms. |
|
||||
| `connect_duration_success_bucket_gt_1000ms` | `u64` | Successful connects >1000 ms. |
|
||||
| `connect_duration_fail_bucket_le_100ms` | `u64` | Failed connects <=100 ms. |
|
||||
| `connect_duration_fail_bucket_101_500ms` | `u64` | Failed connects 101-500 ms. |
|
||||
| `connect_duration_fail_bucket_501_1000ms` | `u64` | Failed connects 501-1000 ms. |
|
||||
| `connect_duration_fail_bucket_gt_1000ms` | `u64` | Failed connects >1000 ms. |
|
||||
|
||||
### `UpstreamsData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `enabled` | `bool` | Runtime upstream snapshot availability according to API config. |
|
||||
| `reason` | `string?` | `feature_disabled` or `source_unavailable` when runtime snapshot is unavailable. |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot generation time. |
|
||||
| `zero` | `ZeroUpstreamData` | Always available zero-cost upstream counters block. |
|
||||
| `summary` | `UpstreamSummaryData?` | Runtime upstream aggregate view, null when unavailable. |
|
||||
| `upstreams` | `UpstreamStatus[]?` | Per-upstream runtime status rows, null when unavailable. |
|
||||
|
||||
#### `UpstreamSummaryData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `configured_total` | `usize` | Total configured upstream entries. |
|
||||
| `healthy_total` | `usize` | Upstreams currently marked healthy. |
|
||||
| `unhealthy_total` | `usize` | Upstreams currently marked unhealthy. |
|
||||
| `direct_total` | `usize` | Number of direct upstream entries. |
|
||||
| `socks4_total` | `usize` | Number of SOCKS4 upstream entries. |
|
||||
| `socks5_total` | `usize` | Number of SOCKS5 upstream entries. |
|
||||
|
||||
#### `UpstreamStatus`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `upstream_id` | `usize` | Runtime upstream index. |
|
||||
| `route_kind` | `string` | Upstream route kind: `direct`, `socks4`, `socks5`. |
|
||||
| `address` | `string` | Upstream address (`direct` for direct route kind). Authentication fields are intentionally omitted. |
|
||||
| `weight` | `u16` | Selection weight. |
|
||||
| `scopes` | `string` | Configured scope selector string. |
|
||||
| `healthy` | `bool` | Current health flag. |
|
||||
| `fails` | `u32` | Consecutive fail counter. |
|
||||
| `last_check_age_secs` | `u64` | Seconds since the last health-check update. |
|
||||
| `effective_latency_ms` | `f64?` | Effective upstream latency used by selector. |
|
||||
| `dc` | `UpstreamDcStatus[]` | Per-DC latency/IP preference snapshot. |
|
||||
|
||||
#### `UpstreamDcStatus`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `dc` | `i16` | Telegram DC id. |
|
||||
| `latency_ema_ms` | `f64?` | Per-DC latency EMA value. |
|
||||
| `ip_preference` | `string` | Per-DC IP family preference: `unknown`, `prefer_v4`, `prefer_v6`, `both_work`, `unavailable`. |
|
||||
|
||||
#### `ZeroMiddleProxyData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `keepalive_sent_total` | `u64` | ME keepalive packets sent. |
|
||||
| `keepalive_failed_total` | `u64` | ME keepalive send failures. |
|
||||
| `keepalive_pong_total` | `u64` | Keepalive pong responses received. |
|
||||
| `keepalive_timeout_total` | `u64` | Keepalive timeout events. |
|
||||
| `rpc_proxy_req_signal_sent_total` | `u64` | RPC proxy activity signals sent. |
|
||||
| `rpc_proxy_req_signal_failed_total` | `u64` | RPC proxy activity signal failures. |
|
||||
| `rpc_proxy_req_signal_skipped_no_meta_total` | `u64` | Signals skipped due to missing metadata. |
|
||||
| `rpc_proxy_req_signal_response_total` | `u64` | RPC proxy signal responses received. |
|
||||
| `rpc_proxy_req_signal_close_sent_total` | `u64` | RPC proxy close signals sent. |
|
||||
| `reconnect_attempt_total` | `u64` | ME reconnect attempts. |
|
||||
| `reconnect_success_total` | `u64` | Successful reconnects. |
|
||||
| `handshake_reject_total` | `u64` | ME handshake rejects. |
|
||||
| `handshake_error_codes` | `ZeroCodeCount[]` | Handshake rejects grouped by code. |
|
||||
| `reader_eof_total` | `u64` | ME reader EOF events. |
|
||||
| `idle_close_by_peer_total` | `u64` | Idle closes initiated by peer. |
|
||||
| `route_drop_no_conn_total` | `u64` | Route drops due to missing bound connection. |
|
||||
| `route_drop_channel_closed_total` | `u64` | Route drops due to closed channel. |
|
||||
| `route_drop_queue_full_total` | `u64` | Route drops due to full queue (total). |
|
||||
| `route_drop_queue_full_base_total` | `u64` | Route drops in base queue mode. |
|
||||
| `route_drop_queue_full_high_total` | `u64` | Route drops in high queue mode. |
|
||||
| `socks_kdf_strict_reject_total` | `u64` | SOCKS KDF strict rejects. |
|
||||
| `socks_kdf_compat_fallback_total` | `u64` | SOCKS KDF compat fallbacks. |
|
||||
| `endpoint_quarantine_total` | `u64` | Endpoint quarantine activations. |
|
||||
| `kdf_drift_total` | `u64` | KDF drift detections. |
|
||||
| `kdf_port_only_drift_total` | `u64` | KDF port-only drift detections. |
|
||||
| `hardswap_pending_reuse_total` | `u64` | Pending hardswap reused events. |
|
||||
| `hardswap_pending_ttl_expired_total` | `u64` | Pending hardswap TTL expiry events. |
|
||||
| `single_endpoint_outage_enter_total` | `u64` | Entered single-endpoint outage mode. |
|
||||
| `single_endpoint_outage_exit_total` | `u64` | Exited single-endpoint outage mode. |
|
||||
| `single_endpoint_outage_reconnect_attempt_total` | `u64` | Reconnect attempts in outage mode. |
|
||||
| `single_endpoint_outage_reconnect_success_total` | `u64` | Reconnect successes in outage mode. |
|
||||
| `single_endpoint_quarantine_bypass_total` | `u64` | Quarantine bypasses in outage mode. |
|
||||
| `single_endpoint_shadow_rotate_total` | `u64` | Shadow writer rotations. |
|
||||
| `single_endpoint_shadow_rotate_skipped_quarantine_total` | `u64` | Shadow rotations skipped because of quarantine. |
|
||||
| `floor_mode_switch_total` | `u64` | Total floor mode switches. |
|
||||
| `floor_mode_switch_static_to_adaptive_total` | `u64` | Static -> adaptive switches. |
|
||||
| `floor_mode_switch_adaptive_to_static_total` | `u64` | Adaptive -> static switches. |
|
||||
|
||||
#### `ZeroCodeCount`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `code` | `i32` | Handshake error code. |
|
||||
| `total` | `u64` | Events with this code. |
|
||||
|
||||
#### `ZeroPoolData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `pool_swap_total` | `u64` | Pool swap count. |
|
||||
| `pool_drain_active` | `u64` | Current active draining pools. |
|
||||
| `pool_force_close_total` | `u64` | Forced pool closes by timeout. |
|
||||
| `pool_stale_pick_total` | `u64` | Stale writer picks for binding. |
|
||||
| `writer_removed_total` | `u64` | Writer removals total. |
|
||||
| `writer_removed_unexpected_total` | `u64` | Unexpected writer removals. |
|
||||
| `refill_triggered_total` | `u64` | Refill triggers. |
|
||||
| `refill_skipped_inflight_total` | `u64` | Refill skipped because refill already in-flight. |
|
||||
| `refill_failed_total` | `u64` | Refill failures. |
|
||||
| `writer_restored_same_endpoint_total` | `u64` | Restores on same endpoint. |
|
||||
| `writer_restored_fallback_total` | `u64` | Restores on fallback endpoint. |
|
||||
|
||||
#### `ZeroDesyncData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `secure_padding_invalid_total` | `u64` | Invalid secure padding events. |
|
||||
| `desync_total` | `u64` | Desync events total. |
|
||||
| `desync_full_logged_total` | `u64` | Fully logged desync events. |
|
||||
| `desync_suppressed_total` | `u64` | Suppressed desync logs. |
|
||||
| `desync_frames_bucket_0` | `u64` | Desync frames bucket 0. |
|
||||
| `desync_frames_bucket_1_2` | `u64` | Desync frames bucket 1-2. |
|
||||
| `desync_frames_bucket_3_10` | `u64` | Desync frames bucket 3-10. |
|
||||
| `desync_frames_bucket_gt_10` | `u64` | Desync frames bucket >10. |
|
||||
|
||||
### `MinimalAllData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `enabled` | `bool` | Whether minimal runtime snapshots are enabled by config. |
|
||||
| `reason` | `string?` | `feature_disabled` or `source_unavailable` when applicable. |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot generation time. |
|
||||
| `data` | `MinimalAllPayload?` | Null when disabled; fallback payload when source unavailable. |
|
||||
|
||||
#### `MinimalAllPayload`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `me_writers` | `MeWritersData` | ME writer status block. |
|
||||
| `dcs` | `DcStatusData` | DC aggregate status block. |
|
||||
| `me_runtime` | `MinimalMeRuntimeData?` | Runtime ME control snapshot. |
|
||||
| `network_path` | `MinimalDcPathData[]` | Active IP path selection per DC. |
|
||||
|
||||
#### `MinimalMeRuntimeData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `active_generation` | `u64` | Active pool generation. |
|
||||
| `warm_generation` | `u64` | Warm pool generation. |
|
||||
| `pending_hardswap_generation` | `u64` | Pending hardswap generation. |
|
||||
| `pending_hardswap_age_secs` | `u64?` | Pending hardswap age in seconds. |
|
||||
| `hardswap_enabled` | `bool` | Hardswap mode toggle. |
|
||||
| `floor_mode` | `string` | Writer floor mode. |
|
||||
| `adaptive_floor_idle_secs` | `u64` | Idle threshold for adaptive floor. |
|
||||
| `adaptive_floor_min_writers_single_endpoint` | `u8` | Minimum writers for single-endpoint DC in adaptive mode. |
|
||||
| `adaptive_floor_recover_grace_secs` | `u64` | Grace period for floor recovery. |
|
||||
| `me_keepalive_enabled` | `bool` | ME keepalive toggle. |
|
||||
| `me_keepalive_interval_secs` | `u64` | Keepalive period. |
|
||||
| `me_keepalive_jitter_secs` | `u64` | Keepalive jitter. |
|
||||
| `me_keepalive_payload_random` | `bool` | Randomized keepalive payload toggle. |
|
||||
| `rpc_proxy_req_every_secs` | `u64` | Period for RPC proxy request signal. |
|
||||
| `me_reconnect_max_concurrent_per_dc` | `u32` | Reconnect concurrency per DC. |
|
||||
| `me_reconnect_backoff_base_ms` | `u64` | Base reconnect backoff. |
|
||||
| `me_reconnect_backoff_cap_ms` | `u64` | Max reconnect backoff. |
|
||||
| `me_reconnect_fast_retry_count` | `u32` | Fast retry attempts before normal backoff. |
|
||||
| `me_pool_drain_ttl_secs` | `u64` | Pool drain TTL. |
|
||||
| `me_pool_force_close_secs` | `u64` | Hard close timeout for draining writers. |
|
||||
| `me_pool_min_fresh_ratio` | `f32` | Minimum fresh ratio before swap. |
|
||||
| `me_bind_stale_mode` | `string` | Stale writer bind policy. |
|
||||
| `me_bind_stale_ttl_secs` | `u64` | Stale writer TTL. |
|
||||
| `me_single_endpoint_shadow_writers` | `u8` | Shadow writers for single-endpoint DCs. |
|
||||
| `me_single_endpoint_outage_mode_enabled` | `bool` | Outage mode toggle for single-endpoint DCs. |
|
||||
| `me_single_endpoint_outage_disable_quarantine` | `bool` | Quarantine behavior in outage mode. |
|
||||
| `me_single_endpoint_outage_backoff_min_ms` | `u64` | Outage mode min reconnect backoff. |
|
||||
| `me_single_endpoint_outage_backoff_max_ms` | `u64` | Outage mode max reconnect backoff. |
|
||||
| `me_single_endpoint_shadow_rotate_every_secs` | `u64` | Shadow rotation interval. |
|
||||
| `me_deterministic_writer_sort` | `bool` | Deterministic writer ordering toggle. |
|
||||
| `me_socks_kdf_policy` | `string` | Current SOCKS KDF policy mode. |
|
||||
| `quarantined_endpoints_total` | `usize` | Total quarantined endpoints. |
|
||||
| `quarantined_endpoints` | `MinimalQuarantineData[]` | Quarantine details. |
|
||||
|
||||
#### `MinimalQuarantineData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `endpoint` | `string` | Endpoint (`ip:port`). |
|
||||
| `remaining_ms` | `u64` | Remaining quarantine duration. |
|
||||
|
||||
#### `MinimalDcPathData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `dc` | `i16` | Telegram DC identifier. |
|
||||
| `ip_preference` | `string?` | Runtime IP family preference. |
|
||||
| `selected_addr_v4` | `string?` | Selected IPv4 endpoint for this DC. |
|
||||
| `selected_addr_v6` | `string?` | Selected IPv6 endpoint for this DC. |
|
||||
|
||||
### `MeWritersData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `middle_proxy_enabled` | `bool` | `false` when minimal runtime is disabled or source unavailable. |
|
||||
| `reason` | `string?` | `feature_disabled` or `source_unavailable` when not fully available. |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot generation time. |
|
||||
| `summary` | `MeWritersSummary` | Coverage/availability summary. |
|
||||
| `writers` | `MeWriterStatus[]` | Per-writer statuses. |
|
||||
|
||||
#### `MeWritersSummary`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `configured_dc_groups` | `usize` | Number of configured DC groups. |
|
||||
| `configured_endpoints` | `usize` | Total configured ME endpoints. |
|
||||
| `available_endpoints` | `usize` | Endpoints currently available. |
|
||||
| `available_pct` | `f64` | `available_endpoints / configured_endpoints * 100`. |
|
||||
| `required_writers` | `usize` | Required writers based on current floor policy. |
|
||||
| `alive_writers` | `usize` | Writers currently alive. |
|
||||
| `coverage_pct` | `f64` | `alive_writers / required_writers * 100`. |
|
||||
|
||||
#### `MeWriterStatus`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `writer_id` | `u64` | Runtime writer identifier. |
|
||||
| `dc` | `i16?` | DC id if mapped. |
|
||||
| `endpoint` | `string` | Endpoint (`ip:port`). |
|
||||
| `generation` | `u64` | Pool generation owning this writer. |
|
||||
| `state` | `string` | Writer state (`warm`, `active`, `draining`). |
|
||||
| `draining` | `bool` | Draining flag. |
|
||||
| `degraded` | `bool` | Degraded flag. |
|
||||
| `bound_clients` | `usize` | Number of currently bound clients. |
|
||||
| `idle_for_secs` | `u64?` | Idle age in seconds if idle. |
|
||||
| `rtt_ema_ms` | `f64?` | RTT exponential moving average. |
|
||||
|
||||
### `DcStatusData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `middle_proxy_enabled` | `bool` | `false` when minimal runtime is disabled or source unavailable. |
|
||||
| `reason` | `string?` | `feature_disabled` or `source_unavailable` when not fully available. |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot generation time. |
|
||||
| `dcs` | `DcStatus[]` | Per-DC status rows. |
|
||||
|
||||
#### `DcStatus`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `dc` | `i16` | Telegram DC id. |
|
||||
| `endpoints` | `string[]` | Endpoints in this DC (`ip:port`). |
|
||||
| `available_endpoints` | `usize` | Endpoints currently available in this DC. |
|
||||
| `available_pct` | `f64` | `available_endpoints / endpoints_total * 100`. |
|
||||
| `required_writers` | `usize` | Required writer count for this DC. |
|
||||
| `alive_writers` | `usize` | Alive writers in this DC. |
|
||||
| `coverage_pct` | `f64` | `alive_writers / required_writers * 100`. |
|
||||
| `rtt_ms` | `f64?` | Aggregated RTT for DC. |
|
||||
| `load` | `usize` | Active client sessions bound to this DC. |
|
||||
|
||||
### `UserInfo`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `username` | `string` | Username. |
|
||||
| `user_ad_tag` | `string?` | Optional ad tag (32 hex chars). |
|
||||
| `max_tcp_conns` | `usize?` | Optional max concurrent TCP limit. |
|
||||
| `expiration_rfc3339` | `string?` | Optional expiration timestamp. |
|
||||
| `data_quota_bytes` | `u64?` | Optional data quota. |
|
||||
| `max_unique_ips` | `usize?` | Optional unique IP limit. |
|
||||
| `current_connections` | `u64` | Current live connections. |
|
||||
| `active_unique_ips` | `usize` | Current active unique source IPs. |
|
||||
| `total_octets` | `u64` | Total traffic octets for this user. |
|
||||
| `links` | `UserLinks` | Active connection links derived from current config. |
|
||||
|
||||
#### `UserLinks`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `classic` | `string[]` | Active `tg://proxy` links for classic mode. |
|
||||
| `secure` | `string[]` | Active `tg://proxy` links for secure/DD mode. |
|
||||
| `tls` | `string[]` | Active `tg://proxy` links for EE-TLS mode (for each host+TLS domain). |
|
||||
|
||||
Link generation uses active config and enabled modes:
|
||||
- `[general.links].public_host/public_port` have priority.
|
||||
- If `public_host` is not set, startup-detected public IPs are used (`IPv4`, `IPv6`, or both when available).
|
||||
- Fallback host sources: listener `announce`, `announce_ip`, explicit listener `ip`.
|
||||
- Legacy fallback: `listen_addr_ipv4` and `listen_addr_ipv6` when routable.
|
||||
- Startup-detected IPs are fixed for process lifetime and refreshed on restart.
|
||||
- User rows are sorted by `username` in ascending lexical order.
|
||||
|
||||
### `CreateUserResponse`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `user` | `UserInfo` | Created or updated user view. |
|
||||
| `secret` | `string` | Effective user secret. |
|
||||
|
||||
## Mutation Semantics
|
||||
|
||||
| Endpoint | Notes |
|
||||
| --- | --- |
|
||||
| `POST /v1/users` | Creates user and validates resulting config before atomic save. |
|
||||
| `PATCH /v1/users/{username}` | Partial update of provided fields only. Missing fields remain unchanged. |
|
||||
| `POST /v1/users/{username}/rotate-secret` | Currently returns `404` in runtime route matcher; request schema is reserved for intended behavior. |
|
||||
| `DELETE /v1/users/{username}` | Deletes user and related optional settings. Last user deletion is blocked. |
|
||||
|
||||
All mutating endpoints:
|
||||
- Respect `read_only` mode.
|
||||
- Accept optional `If-Match` for optimistic concurrency.
|
||||
- Return new `revision` after successful write.
|
||||
- Use process-local mutation lock + atomic write (`tmp + rename`) for config persistence.
|
||||
|
||||
## Runtime State Matrix
|
||||
|
||||
| Endpoint | `minimal_runtime_enabled=false` | `minimal_runtime_enabled=true` + source unavailable | `minimal_runtime_enabled=true` + source available |
|
||||
| --- | --- | --- | --- |
|
||||
| `/v1/stats/minimal/all` | `enabled=false`, `reason=feature_disabled`, `data=null` | `enabled=true`, `reason=source_unavailable`, fallback `data` with disabled ME blocks | `enabled=true`, `reason` omitted, full payload |
|
||||
| `/v1/stats/me-writers` | `middle_proxy_enabled=false`, `reason=feature_disabled` | `middle_proxy_enabled=false`, `reason=source_unavailable` | `middle_proxy_enabled=true`, runtime snapshot |
|
||||
| `/v1/stats/dcs` | `middle_proxy_enabled=false`, `reason=feature_disabled` | `middle_proxy_enabled=false`, `reason=source_unavailable` | `middle_proxy_enabled=true`, runtime snapshot |
|
||||
| `/v1/stats/upstreams` | `enabled=false`, `reason=feature_disabled`, `summary/upstreams` omitted, `zero` still present | `enabled=true`, `reason=source_unavailable`, `summary/upstreams` omitted, `zero` present | `enabled=true`, `reason` omitted, `summary/upstreams` present, `zero` present |
|
||||
|
||||
`source_unavailable` conditions:
|
||||
- ME endpoints: ME pool is absent (for example direct-only mode or failed ME initialization).
|
||||
- Upstreams endpoint: non-blocking upstream snapshot lock is unavailable at request time.
|
||||
|
||||
## Serialization Rules
|
||||
|
||||
- Success responses always include `revision`.
|
||||
- Error responses never include `revision`; they include `request_id`.
|
||||
- Optional fields with `skip_serializing_if` are omitted when absent.
|
||||
- Nullable payload fields may still be `null` where contract uses `?` (for example `UserInfo` option fields).
|
||||
- For `/v1/stats/upstreams`, authentication details of SOCKS upstreams are intentionally omitted.
|
||||
|
||||
## Operational Notes
|
||||
|
||||
| Topic | Details |
|
||||
| --- | --- |
|
||||
| API startup | API listener is spawned only when `[server.api].enabled=true`. |
|
||||
| `listen` port `0` | API spawn is skipped when parsed listen port is `0` (treated as disabled bind target). |
|
||||
| Bind failure | Failed API bind logs warning and API task exits (no auto-retry loop). |
|
||||
| ME runtime status endpoints | `/v1/stats/me-writers`, `/v1/stats/dcs`, `/v1/stats/minimal/all` require `[server.api].minimal_runtime_enabled=true`; otherwise they return disabled payload with `reason=feature_disabled`. |
|
||||
| Upstream runtime endpoint | `/v1/stats/upstreams` always returns `zero`, but runtime fields (`summary`, `upstreams`) require `[server.api].minimal_runtime_enabled=true`. |
|
||||
| Restart requirements | `server.api` changes are restart-required for predictable behavior. |
|
||||
| Hot-reload nuance | A pure `server.api`-only config change may not propagate through watcher broadcast; a mixed change (with hot fields) may propagate API flags while still warning that restart is required. |
|
||||
| Runtime apply path | Successful writes are picked up by existing config watcher/hot-reload path. |
|
||||
| Exposure | Built-in TLS/mTLS is not provided. Use loopback bind + reverse proxy if needed. |
|
||||
| Pagination | User list currently has no pagination/filtering. |
|
||||
| Serialization side effect | Config comments/manual formatting are not preserved on write. |
|
||||
|
||||
## Known Limitations (Current Release)
|
||||
|
||||
- `POST /v1/users/{username}/rotate-secret` is currently unreachable in route matcher and returns `404`.
|
||||
- API runtime controls under `server.api` are documented as restart-required; hot-reload behavior for these fields is not strictly uniform in all change combinations.
|
||||
112
docs/FAQ.en.md
Normal file
112
docs/FAQ.en.md
Normal file
@@ -0,0 +1,112 @@
|
||||
## How to set up "proxy sponsor" channel and statistics via @MTProxybot bot
|
||||
|
||||
1. Go to @MTProxybot bot.
|
||||
2. Enter the command `/newproxy`
|
||||
3. Send the server IP and port. For example: 1.2.3.4:443
|
||||
4. Open the config `nano /etc/telemt.toml`.
|
||||
5. Copy and send the user secret from the [access.users] section to the bot.
|
||||
6. Copy the tag received from the bot. For example 1234567890abcdef1234567890abcdef.
|
||||
> [!WARNING]
|
||||
> The link provided by the bot will not work. Do not copy or use it!
|
||||
7. Uncomment the ad_tag parameter and enter the tag received from the bot.
|
||||
8. Uncomment/add the parameter `use_middle_proxy = true`.
|
||||
|
||||
Config example:
|
||||
```toml
|
||||
[general]
|
||||
ad_tag = "1234567890abcdef1234567890abcdef"
|
||||
use_middle_proxy = true
|
||||
```
|
||||
9. Save the config. Ctrl+S -> Ctrl+X.
|
||||
10. Restart telemt `systemctl restart telemt`.
|
||||
11. In the bot, send the command /myproxies and select the added server.
|
||||
12. Click the "Set promotion" button.
|
||||
13. Send a **public link** to the channel. Private channels cannot be added!
|
||||
14. Wait approximately 1 hour for the information to update on Telegram servers.
|
||||
> [!WARNING]
|
||||
> You will not see the "proxy sponsor" if you are already subscribed to the channel.
|
||||
|
||||
**You can also set up different channels for different users.**
|
||||
```toml
|
||||
[access.user_ad_tags]
|
||||
hello = "ad_tag"
|
||||
hello2 = "ad_tag2"
|
||||
```
|
||||
|
||||
## How many people can use 1 link
|
||||
|
||||
By default, 1 link can be used by any number of people.
|
||||
You can limit the number of IPs using the proxy.
|
||||
```toml
|
||||
[access.user_max_unique_ips]
|
||||
hello = 1
|
||||
```
|
||||
This parameter limits how many unique IPs can use 1 link simultaneously. If one user disconnects, a second user can connect. Also, multiple users can sit behind the same IP.
|
||||
|
||||
## How to create multiple different links
|
||||
|
||||
1. Generate the required number of secrets `openssl rand -hex 16`
|
||||
2. Open the config `nano /etc/telemt.toml`
|
||||
3. Add new users.
|
||||
```toml
|
||||
[access.users]
|
||||
user1 = "00000000000000000000000000000001"
|
||||
user2 = "00000000000000000000000000000002"
|
||||
user3 = "00000000000000000000000000000003"
|
||||
```
|
||||
4. Save the config. Ctrl+S -> Ctrl+X. You don't need to restart telemt.
|
||||
5. Get the links via `journalctl -u telemt -n -g "links" --no-pager -o cat | tac`
|
||||
|
||||
## How to view metrics
|
||||
|
||||
1. Open the config `nano /etc/telemt.toml`
|
||||
2. Add the following parameters
|
||||
```toml
|
||||
[server]
|
||||
metrics_port = 9090
|
||||
metrics_whitelist = ["127.0.0.1/32", "::1/128", "0.0.0.0/0"]
|
||||
```
|
||||
3. Save the config. Ctrl+S -> Ctrl+X.
|
||||
4. Metrics are available at SERVER_IP:9090/metrics.
|
||||
> [!WARNING]
|
||||
> "0.0.0.0/0" in metrics_whitelist opens access from any IP. Replace with your own IP. For example "1.2.3.4"
|
||||
|
||||
## Additional parameters
|
||||
|
||||
### Domain in link instead of IP
|
||||
To specify a domain in the links, add to the `[general.links]` section of the config file.
|
||||
```toml
|
||||
[general.links]
|
||||
public_host = "proxy.example.com"
|
||||
```
|
||||
|
||||
### Upstream Manager
|
||||
To specify an upstream, add to the `[[upstreams]]` section of the config.toml file:
|
||||
#### Binding to IP
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 1
|
||||
enabled = true
|
||||
interface = "192.168.1.100" # Change to your outgoing IP
|
||||
```
|
||||
#### SOCKS4/5 as Upstream
|
||||
- Without authentication:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
enabled = true
|
||||
```
|
||||
|
||||
- With authentication:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||
username = "user" # Username for Auth on SOCKS-server
|
||||
password = "pass" # Password for Auth on SOCKS-server
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
enabled = true
|
||||
```
|
||||
112
docs/FAQ.ru.md
Normal file
112
docs/FAQ.ru.md
Normal file
@@ -0,0 +1,112 @@
|
||||
## Как настроить канал "спонсор прокси" и статистику через бота @MTProxybot
|
||||
|
||||
1. Зайти в бота @MTProxybot.
|
||||
2. Ввести команду `/newproxy`
|
||||
3. Отправить IP и порт сервера. Например: 1.2.3.4:443
|
||||
4. Открыть конфиг `nano /etc/telemt.toml`.
|
||||
5. Скопировать и отправить боту секрет пользователя из раздела [access.users].
|
||||
6. Скопировать полученный tag у бота. Например 1234567890abcdef1234567890abcdef.
|
||||
> [!WARNING]
|
||||
> Ссылка, которую выдает бот, не будет работать. Не копируйте и не используйте её!
|
||||
7. Раскомментировать параметр ad_tag и вписать tag, полученный у бота.
|
||||
8. Раскомментировать/добавить параметр use_middle_proxy = true.
|
||||
|
||||
Пример конфига:
|
||||
```toml
|
||||
[general]
|
||||
ad_tag = "1234567890abcdef1234567890abcdef"
|
||||
use_middle_proxy = true
|
||||
```
|
||||
9. Сохранить конфиг. Ctrl+S -> Ctrl+X.
|
||||
10. Перезапустить telemt `systemctl restart telemt`.
|
||||
11. В боте отправить команду /myproxies и выбрать добавленный сервер.
|
||||
12. Нажать кнопку "Set promotion".
|
||||
13. Отправить **публичную ссылку** на канал. Приватный канал добавить нельзя!
|
||||
14. Подождать примерно 1 час, пока информация обновится на серверах Telegram.
|
||||
> [!WARNING]
|
||||
> У вас не будет отображаться "спонсор прокси" если вы уже подписаны на канал.
|
||||
|
||||
**Также вы можете настроить разные каналы для разных пользователей.**
|
||||
```toml
|
||||
[access.user_ad_tags]
|
||||
hello = "ad_tag"
|
||||
hello2 = "ad_tag2"
|
||||
```
|
||||
|
||||
## Сколько человек может пользоваться 1 ссылкой
|
||||
|
||||
По умолчанию 1 ссылкой может пользоваться сколько угодно человек.
|
||||
Вы можете ограничить число IP, использующих прокси.
|
||||
```toml
|
||||
[access.user_max_unique_ips]
|
||||
hello = 1
|
||||
```
|
||||
Этот параметр ограничивает, сколько уникальных IP может использовать 1 ссылку одновременно. Если один пользователь отключится, второй сможет подключиться. Также с одного IP может сидеть несколько пользователей.
|
||||
|
||||
## Как сделать несколько разных ссылок
|
||||
|
||||
1. Сгенерируйте нужное число секретов `openssl rand -hex 16`
|
||||
2. Открыть конфиг `nano /etc/telemt.toml`
|
||||
3. Добавить новых пользователей.
|
||||
```toml
|
||||
[access.users]
|
||||
user1 = "00000000000000000000000000000001"
|
||||
user2 = "00000000000000000000000000000002"
|
||||
user3 = "00000000000000000000000000000003"
|
||||
```
|
||||
4. Сохранить конфиг. Ctrl+S -> Ctrl+X. Перезапускать telemt не нужно.
|
||||
5. Получить ссылки через `journalctl -u telemt -n -g "links" --no-pager -o cat | tac`
|
||||
|
||||
## Как посмотреть метрики
|
||||
|
||||
1. Открыть конфиг `nano /etc/telemt.toml`
|
||||
2. Добавить следующие параметры
|
||||
```toml
|
||||
[server]
|
||||
metrics_port = 9090
|
||||
metrics_whitelist = ["127.0.0.1/32", "::1/128", "0.0.0.0/0"]
|
||||
```
|
||||
3. Сохранить конфиг. Ctrl+S -> Ctrl+X.
|
||||
4. Метрики доступны по адресу SERVER_IP:9090/metrics.
|
||||
> [!WARNING]
|
||||
> "0.0.0.0/0" в metrics_whitelist открывает доступ с любого IP. Замените на свой ip. Например "1.2.3.4"
|
||||
|
||||
## Дополнительные параметры
|
||||
|
||||
### Домен в ссылке вместо IP
|
||||
Чтобы указать домен в ссылках, добавьте в секцию `[general.links]` файла config.
|
||||
```toml
|
||||
[general.links]
|
||||
public_host = "proxy.example.com"
|
||||
```
|
||||
|
||||
### Upstream Manager
|
||||
Чтобы указать апстрим, добавьте в секцию `[[upstreams]]` файла config.toml:
|
||||
#### Привязка к IP
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 1
|
||||
enabled = true
|
||||
interface = "192.168.1.100" # Change to your outgoing IP
|
||||
```
|
||||
#### SOCKS4/5 как Upstream
|
||||
- Без авторизации:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
enabled = true
|
||||
```
|
||||
|
||||
- С авторизацией:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||
username = "user" # Username for Auth on SOCKS-server
|
||||
password = "pass" # Password for Auth on SOCKS-server
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
enabled = true
|
||||
```
|
||||
40
docs/MIDDLE-END-KDF.de.md
Normal file
40
docs/MIDDLE-END-KDF.de.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# Middle-End Proxy
|
||||
|
||||
## KDF-Adressierung — Implementierungs-FAQ
|
||||
|
||||
### Benötigt die C-Referenzimplementierung sowohl externe IP-Adresse als auch Port für die KDF?
|
||||
|
||||
Ja.
|
||||
|
||||
In der C-Referenzimplementierung werden **sowohl IP-Adresse als auch Port in die KDF einbezogen** — auf beiden Seiten der Verbindung.
|
||||
|
||||
In `aes_create_keys()` enthält der KDF-Input:
|
||||
|
||||
- `server_ip + client_port`
|
||||
- `client_ip + server_port`
|
||||
- sowie Secret / Nonces
|
||||
|
||||
Für IPv6:
|
||||
|
||||
- IPv4-Felder werden auf 0 gesetzt
|
||||
- IPv6-Adressen werden ergänzt
|
||||
|
||||
Die **Ports bleiben weiterhin Bestandteil der KDF**.
|
||||
|
||||
> Wenn sich externe IP oder Port (z. B. durch NAT, SOCKS oder Proxy) von den erwarteten Werten unterscheiden, entstehen unterschiedliche Schlüssel — der Handshake schlägt fehl.
|
||||
|
||||
---
|
||||
|
||||
### Kann der Port aus der KDF ausgeschlossen werden (z. B. durch Port = 0)?
|
||||
|
||||
**Nein!**
|
||||
|
||||
Die C-Referenzimplementierung enthält **keine Möglichkeit, den Port zu ignorieren**:
|
||||
- `client_port` und `server_port` sind fester Bestandteil der KDF
|
||||
- Es werden immer reale Socket-Ports übergeben:
|
||||
- `c->our_port`
|
||||
- `c->remote_port`
|
||||
|
||||
Falls ein Port den Wert `0` hat, wird er dennoch als `0` in die KDF übernommen.
|
||||
|
||||
Eine „Port-Ignore“-Logik existiert nicht.
|
||||
41
docs/MIDDLE-END-KDF.en.md
Normal file
41
docs/MIDDLE-END-KDF.en.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Middle-End Proxy
|
||||
|
||||
## KDF Addressing — Implementation FAQ
|
||||
|
||||
### Does the C-implementation require both external IP address and port for the KDF?
|
||||
|
||||
**Yes!**
|
||||
|
||||
In the C reference implementation, **both IP address and port are included in the KDF input** from both sides of the connection.
|
||||
|
||||
Inside `aes_create_keys()`, the KDF input explicitly contains:
|
||||
|
||||
- `server_ip + client_port`
|
||||
- `client_ip + server_port`
|
||||
- followed by shared secret / nonces
|
||||
|
||||
For IPv6:
|
||||
|
||||
- IPv4 fields are zeroed
|
||||
- IPv6 addresses are inserted
|
||||
|
||||
However, **client_port and server_port remain part of the KDF regardless of IP version**.
|
||||
|
||||
> If externally observed IP or port (e.g. due to NAT, SOCKS, or proxy traversal) differs from what the peer expects, the derived keys will not match and the handshake will fail.
|
||||
|
||||
---
|
||||
|
||||
### Can port be excluded from KDF (e.g. by using port = 0)?
|
||||
|
||||
**No!**
|
||||
|
||||
The C-implementation provides **no mechanism to ignore the port**:
|
||||
|
||||
- `client_port` and `server_port` are explicitly included in the KDF input
|
||||
- Real socket ports are always passed:
|
||||
- `c->our_port`
|
||||
- `c->remote_port`
|
||||
|
||||
If a port is `0`, it is still incorporated into the KDF as `0`.
|
||||
|
||||
There is **no conditional logic to exclude ports**
|
||||
41
docs/MIDDLE-END-KDF.ru.md
Normal file
41
docs/MIDDLE-END-KDF.ru.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Middle-End Proxy
|
||||
|
||||
## KDF Addressing — FAQ по реализации
|
||||
|
||||
### Требует ли C-референсная реализация KDF внешний IP и порт?
|
||||
|
||||
**Да**
|
||||
|
||||
В C-референсе **в KDF участвуют и IP-адрес, и порт** — с обеих сторон соединения.
|
||||
|
||||
В `aes_create_keys()` в строку KDF входят:
|
||||
|
||||
- `server_ip + client_port`
|
||||
- `client_ip + server_port`
|
||||
- далее secret / nonces
|
||||
|
||||
Для IPv6:
|
||||
|
||||
- IPv4-поля заполняются нулями
|
||||
- добавляются IPv6-адреса
|
||||
|
||||
Однако **порты client_port и server_port всё равно участвуют в KDF**.
|
||||
|
||||
> Если внешний IP или порт (например, из-за NAT, SOCKS или прокси) не совпадает с ожидаемым другой стороной — ключи расходятся и handshake ломается.
|
||||
|
||||
---
|
||||
|
||||
### Можно ли исключить порт из KDF (например, установив порт = 0)?
|
||||
|
||||
**Нет.**
|
||||
|
||||
В C-референсе **нет механики отключения порта**.
|
||||
|
||||
- `client_port` и `server_port` явно включены в KDF
|
||||
- Передаются реальные порты сокета:
|
||||
- `c->our_port`
|
||||
- `c->remote_port`
|
||||
|
||||
Если порт равен `0`, он всё равно попадёт в KDF как `0`.
|
||||
|
||||
Отдельной логики «игнорировать порт» не предусмотрено.
|
||||
165
docs/QUICK_START_GUIDE.en.md
Normal file
165
docs/QUICK_START_GUIDE.en.md
Normal file
@@ -0,0 +1,165 @@
|
||||
# Telemt via Systemd
|
||||
|
||||
## Installation
|
||||
|
||||
This software is designed for Debian-based OS: in addition to Debian, these are Ubuntu, Mint, Kali, MX and many other Linux
|
||||
|
||||
**1. Download**
|
||||
```bash
|
||||
wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz
|
||||
```
|
||||
**2. Move to the Bin folder**
|
||||
```bash
|
||||
mv telemt /bin
|
||||
```
|
||||
**3. Make the file executable**
|
||||
```bash
|
||||
chmod +x /bin/telemt
|
||||
```
|
||||
|
||||
## How to use?
|
||||
|
||||
**This guide "assumes" that you:**
|
||||
- logged in as root or executed `su -` / `sudo su`
|
||||
- Already have the "telemt" executable file in the /bin folder. Read the **[Installation](#Installation)** section.
|
||||
|
||||
---
|
||||
|
||||
**0. Check port and generate secrets**
|
||||
|
||||
The port you have selected for use should be MISSING from the list, when:
|
||||
```bash
|
||||
netstat -lnp
|
||||
```
|
||||
|
||||
Generate 16 bytes/32 characters HEX with OpenSSL or another way:
|
||||
```bash
|
||||
openssl rand -hex 16
|
||||
```
|
||||
OR
|
||||
```bash
|
||||
xxd -l 16 -p /dev/urandom
|
||||
```
|
||||
OR
|
||||
```bash
|
||||
python3 -c 'import os; print(os.urandom(16).hex())'
|
||||
```
|
||||
Save the obtained result somewhere. You will need it later!
|
||||
|
||||
---
|
||||
|
||||
**1. Place your config to /etc/telemt.toml**
|
||||
|
||||
Open nano
|
||||
```bash
|
||||
nano /etc/telemt.toml
|
||||
```
|
||||
paste your config
|
||||
|
||||
```toml
|
||||
# === General Settings ===
|
||||
[general]
|
||||
# ad_tag = "00000000000000000000000000000000"
|
||||
use_middle_proxy = false
|
||||
|
||||
[general.modes]
|
||||
classic = false
|
||||
secure = false
|
||||
tls = true
|
||||
|
||||
[server.api]
|
||||
enabled = true
|
||||
# listen = "127.0.0.1:9091"
|
||||
# whitelist = ["127.0.0.1/32"]
|
||||
# read_only = true
|
||||
|
||||
# === Anti-Censorship & Masking ===
|
||||
[censorship]
|
||||
tls_domain = "petrovich.ru"
|
||||
|
||||
[access.users]
|
||||
# format: "username" = "32_hex_chars_secret"
|
||||
hello = "00000000000000000000000000000000"
|
||||
```
|
||||
|
||||
then Ctrl+S -> Ctrl+X to save
|
||||
|
||||
> [!WARNING]
|
||||
> Replace the value of the hello parameter with the value you obtained in step 0.
|
||||
> Replace the value of the tls_domain parameter with another website.
|
||||
|
||||
---
|
||||
|
||||
**2. Create service on /etc/systemd/system/telemt.service**
|
||||
|
||||
Open nano
|
||||
```bash
|
||||
nano /etc/systemd/system/telemt.service
|
||||
```
|
||||
|
||||
paste this Systemd Module
|
||||
```bash
|
||||
[Unit]
|
||||
Description=Telemt
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/bin
|
||||
ExecStart=/bin/telemt /etc/telemt.toml
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
then Ctrl+S -> Ctrl+X to save
|
||||
|
||||
|
||||
**3.** To start it, enter the command `systemctl start telemt`
|
||||
|
||||
**4.** To get status information, enter `systemctl status telemt`
|
||||
|
||||
**5.** For automatic startup at system boot, enter `systemctl enable telemt`
|
||||
|
||||
**6.** To get the link(s), enter
|
||||
```bash
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||
```
|
||||
|
||||
> Any number of people can use one link.
|
||||
|
||||
---
|
||||
|
||||
# Telemt via Docker Compose
|
||||
|
||||
**1. Edit `config.toml` in repo root (at least: port, users secrets, tls_domain)**
|
||||
**2. Start container:**
|
||||
```bash
|
||||
docker compose up -d --build
|
||||
```
|
||||
**3. Check logs:**
|
||||
```bash
|
||||
docker compose logs -f telemt
|
||||
```
|
||||
**4. Stop:**
|
||||
```bash
|
||||
docker compose down
|
||||
```
|
||||
> [!NOTE]
|
||||
> - `docker-compose.yml` maps `./config.toml` to `/app/config.toml` (read-only)
|
||||
> - By default it publishes `443:443` and runs with dropped capabilities (only `NET_BIND_SERVICE` is added)
|
||||
> - If you really need host networking (usually only for some IPv6 setups) uncomment `network_mode: host`
|
||||
|
||||
**Run without Compose**
|
||||
```bash
|
||||
docker build -t telemt:local .
|
||||
docker run --name telemt --restart unless-stopped \
|
||||
-p 443:443 \
|
||||
-e RUST_LOG=info \
|
||||
-v "$PWD/config.toml:/app/config.toml:ro" \
|
||||
--read-only \
|
||||
--cap-drop ALL --cap-add NET_BIND_SERVICE \
|
||||
--ulimit nofile=65536:65536 \
|
||||
telemt:local
|
||||
```
|
||||
167
docs/QUICK_START_GUIDE.ru.md
Normal file
167
docs/QUICK_START_GUIDE.ru.md
Normal file
@@ -0,0 +1,167 @@
|
||||
# Telemt через Systemd
|
||||
|
||||
## Установка
|
||||
|
||||
Это программное обеспечение разработано для ОС на базе Debian: помимо Debian, это Ubuntu, Mint, Kali, MX и многие другие Linux
|
||||
|
||||
**1. Скачать**
|
||||
```bash
|
||||
wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz
|
||||
```
|
||||
**2. Переместить в папку Bin**
|
||||
```bash
|
||||
mv telemt /bin
|
||||
```
|
||||
**3. Сделать файл исполняемым**
|
||||
```bash
|
||||
chmod +x /bin/telemt
|
||||
```
|
||||
|
||||
## Как правильно использовать?
|
||||
|
||||
**Эта инструкция "предполагает", что вы:**
|
||||
- Авторизовались как пользователь root или выполнил `su -` / `sudo su`
|
||||
- У вас уже есть исполняемый файл "telemt" в папке /bin. Читайте раздел **[Установка](#установка)**
|
||||
|
||||
---
|
||||
|
||||
**0. Проверьте порт и сгенерируйте секреты**
|
||||
|
||||
Порт, который вы выбрали для использования, должен отсутствовать в списке:
|
||||
```bash
|
||||
netstat -lnp
|
||||
```
|
||||
|
||||
Сгенерируйте 16 bytes/32 символа в шестнадцатеричном формате с помощью OpenSSL или другим способом:
|
||||
```bash
|
||||
openssl rand -hex 16
|
||||
```
|
||||
ИЛИ
|
||||
```bash
|
||||
xxd -l 16 -p /dev/urandom
|
||||
```
|
||||
ИЛИ
|
||||
```bash
|
||||
python3 -c 'import os; print(os.urandom(16).hex())'
|
||||
```
|
||||
Полученный результат сохраняем где-нибудь. Он понадобиться вам дальше!
|
||||
|
||||
---
|
||||
|
||||
**1. Поместите свою конфигурацию в файл /etc/telemt.toml**
|
||||
|
||||
Открываем nano
|
||||
```bash
|
||||
nano /etc/telemt.toml
|
||||
```
|
||||
Вставьте свою конфигурацию
|
||||
|
||||
```toml
|
||||
# === General Settings ===
|
||||
[general]
|
||||
# ad_tag = "00000000000000000000000000000000"
|
||||
use_middle_proxy = false
|
||||
|
||||
[general.modes]
|
||||
classic = false
|
||||
secure = false
|
||||
tls = true
|
||||
|
||||
[server.api]
|
||||
enabled = true
|
||||
# listen = "127.0.0.1:9091"
|
||||
# whitelist = ["127.0.0.1/32"]
|
||||
# read_only = true
|
||||
|
||||
# === Anti-Censorship & Masking ===
|
||||
[censorship]
|
||||
tls_domain = "petrovich.ru"
|
||||
|
||||
[access.users]
|
||||
# format: "username" = "32_hex_chars_secret"
|
||||
hello = "00000000000000000000000000000000"
|
||||
```
|
||||
|
||||
Затем нажмите Ctrl+S -> Ctrl+X, чтобы сохранить
|
||||
|
||||
> [!WARNING]
|
||||
> Замените значение параметра hello на значение, которое вы получили в пункте 0.
|
||||
> Так же замените значение параметра tls_domain на другой сайт.
|
||||
|
||||
---
|
||||
|
||||
**2. Создайте службу в /etc/systemd/system/telemt.service**
|
||||
|
||||
Открываем nano
|
||||
```bash
|
||||
nano /etc/systemd/system/telemt.service
|
||||
```
|
||||
|
||||
Вставьте этот модуль Systemd
|
||||
```bash
|
||||
[Unit]
|
||||
Description=Telemt
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/bin
|
||||
ExecStart=/bin/telemt /etc/telemt.toml
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
Затем нажмите Ctrl+S -> Ctrl+X, чтобы сохранить
|
||||
|
||||
|
||||
**3.** Для запуска введите команду `systemctl start telemt`
|
||||
|
||||
**4.** Для получения информации о статусе введите `systemctl status telemt`
|
||||
|
||||
**5.** Для автоматического запуска при запуске системы в введите `systemctl enable telemt`
|
||||
|
||||
**6.** Для получения ссылки/ссылок введите
|
||||
```bash
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||
```
|
||||
> Одной ссылкой может пользоваться сколько угодно человек.
|
||||
|
||||
> [!WARNING]
|
||||
> Рабочую ссылку может выдать только команда из 6 пункта. Не пытайтесь делать ее самостоятельно или копировать откуда-либо если вы не уверены в том, что делаете!
|
||||
|
||||
---
|
||||
|
||||
# Telemt через Docker Compose
|
||||
|
||||
**1. Отредактируйте `config.toml` в корневом каталоге репозитория (как минимум: порт, пользовательские секреты, tls_domain)**
|
||||
**2. Запустите контейнер:**
|
||||
```bash
|
||||
docker compose up -d --build
|
||||
```
|
||||
**3. Проверьте логи:**
|
||||
```bash
|
||||
docker compose logs -f telemt
|
||||
```
|
||||
**4. Остановите контейнер:**
|
||||
```bash
|
||||
docker compose down
|
||||
```
|
||||
> [!NOTE]
|
||||
> - В `docker-compose.yml` файл `./config.toml` монтируется в `/app/config.toml` (доступно только для чтения)
|
||||
> - По умолчанию публикуются порты 443:443, а контейнер запускается со сброшенными привилегиями (добавлена только `NET_BIND_SERVICE`)
|
||||
> - Если вам действительно нужна сеть хоста (обычно это требуется только для некоторых конфигураций IPv6), раскомментируйте `network_mode: host`
|
||||
|
||||
**Запуск в Docker Compose**
|
||||
```bash
|
||||
docker build -t telemt:local .
|
||||
docker run --name telemt --restart unless-stopped \
|
||||
-p 443:443 \
|
||||
-e RUST_LOG=info \
|
||||
-v "$PWD/config.toml:/app/config.toml:ro" \
|
||||
--read-only \
|
||||
--cap-drop ALL --cap-add NET_BIND_SERVICE \
|
||||
--ulimit nofile=65536:65536 \
|
||||
telemt:local
|
||||
```
|
||||
219
docs/TUNING.de.md
Normal file
219
docs/TUNING.de.md
Normal file
@@ -0,0 +1,219 @@
|
||||
# Telemt Tuning-Leitfaden: Middle-End und Upstreams
|
||||
|
||||
Dieses Dokument beschreibt das aktuelle Laufzeitverhalten für Middle-End (ME) und Upstream-Routing basierend auf:
|
||||
- `src/config/types.rs`
|
||||
- `src/config/defaults.rs`
|
||||
- `src/config/load.rs`
|
||||
- `src/transport/upstream.rs`
|
||||
|
||||
Die unten angegebenen `Default`-Werte sind Code-Defaults (bei fehlendem Schlüssel), nicht zwingend die Werte aus `config.full.toml`.
|
||||
|
||||
## Middle-End-Parameter
|
||||
|
||||
### 1) ME-Grundmodus, NAT und STUN
|
||||
|
||||
| Parameter | Typ | Default | Einschränkungen / Validierung | Laufzeiteffekt | Beispiel |
|
||||
|---|---|---:|---|---|---|
|
||||
| `general.use_middle_proxy` | `bool` | `true` | keine | Aktiviert den ME-Transportmodus. Bei `false` wird Direct-Modus verwendet. | `use_middle_proxy = true` |
|
||||
| `general.proxy_secret_path` | `Option<String>` | `"proxy-secret"` | Pfad kann `null` sein | Pfad zur Telegram-Infrastrukturdatei `proxy-secret`. | `proxy_secret_path = "proxy-secret"` |
|
||||
| `general.middle_proxy_nat_ip` | `Option<IpAddr>` | `null` | gültige IP bei gesetztem Wert | Manueller Override der öffentlichen NAT-IP für ME-Adressmaterial. | `middle_proxy_nat_ip = "203.0.113.10"` |
|
||||
| `general.middle_proxy_nat_probe` | `bool` | `true` | wird auf `true` erzwungen, wenn `use_middle_proxy=true` | Aktiviert NAT-Probing für ME. | `middle_proxy_nat_probe = true` |
|
||||
| `general.stun_nat_probe_concurrency` | `usize` | `8` | muss `> 0` sein | Maximale parallele STUN-Probes während NAT-Erkennung. | `stun_nat_probe_concurrency = 16` |
|
||||
| `network.stun_use` | `bool` | `true` | keine | Globaler STUN-Schalter. Bei `false` wird STUN deaktiviert. | `stun_use = true` |
|
||||
| `network.stun_servers` | `Vec<String>` | integrierter öffentlicher Pool | Duplikate/leer werden entfernt | Primäre STUN-Serverliste für NAT/Public-Endpoint-Erkennung. | `stun_servers = ["stun1.l.google.com:19302"]` |
|
||||
| `network.stun_tcp_fallback` | `bool` | `true` | keine | Aktiviert TCP-Fallback, wenn UDP-STUN blockiert ist. | `stun_tcp_fallback = true` |
|
||||
| `network.http_ip_detect_urls` | `Vec<String>` | `ifconfig.me` + `api.ipify.org` | keine | HTTP-Fallback zur öffentlichen IPv4-Erkennung, falls STUN ausfällt. | `http_ip_detect_urls = ["https://api.ipify.org"]` |
|
||||
| `general.stun_iface_mismatch_ignore` | `bool` | `false` | keine | Reserviertes Feld in der aktuellen Revision (derzeit kein aktiver Runtime-Verbrauch). | `stun_iface_mismatch_ignore = false` |
|
||||
| `timeouts.me_one_retry` | `u8` | `12` | keine | Anzahl schneller Reconnect-Versuche bei Single-Endpoint-DC-Fällen. | `me_one_retry = 6` |
|
||||
| `timeouts.me_one_timeout_ms` | `u64` | `1200` | keine | Timeout pro schnellem Einzelversuch (ms). | `me_one_timeout_ms = 1500` |
|
||||
|
||||
### 2) Poolgröße, Keepalive und Reconnect-Policy
|
||||
|
||||
| Parameter | Typ | Default | Einschränkungen / Validierung | Laufzeiteffekt | Beispiel |
|
||||
|---|---|---:|---|---|---|
|
||||
| `general.middle_proxy_pool_size` | `usize` | `8` | keine | Zielgröße des aktiven ME-Writer-Pools. | `middle_proxy_pool_size = 12` |
|
||||
| `general.middle_proxy_warm_standby` | `usize` | `16` | keine | Reserviertes Kompatibilitätsfeld in der aktuellen Revision (kein aktiver Runtime-Consumer). | `middle_proxy_warm_standby = 16` |
|
||||
| `general.me_keepalive_enabled` | `bool` | `true` | keine | Aktiviert periodischen ME-Keepalive/Ping-Traffic. | `me_keepalive_enabled = true` |
|
||||
| `general.me_keepalive_interval_secs` | `u64` | `25` | keine | Basisintervall für Keepalive (Sekunden). | `me_keepalive_interval_secs = 20` |
|
||||
| `general.me_keepalive_jitter_secs` | `u64` | `5` | keine | Keepalive-Jitter zur Vermeidung synchroner Peaks. | `me_keepalive_jitter_secs = 3` |
|
||||
| `general.me_keepalive_payload_random` | `bool` | `true` | keine | Randomisiert Keepalive-Payload-Bytes. | `me_keepalive_payload_random = true` |
|
||||
| `general.me_warmup_stagger_enabled` | `bool` | `true` | keine | Aktiviert gestaffeltes Warmup zusätzlicher ME-Verbindungen. | `me_warmup_stagger_enabled = true` |
|
||||
| `general.me_warmup_step_delay_ms` | `u64` | `500` | keine | Basisverzögerung zwischen Warmup-Schritten (ms). | `me_warmup_step_delay_ms = 300` |
|
||||
| `general.me_warmup_step_jitter_ms` | `u64` | `300` | keine | Zusätzlicher zufälliger Warmup-Jitter (ms). | `me_warmup_step_jitter_ms = 200` |
|
||||
| `general.me_reconnect_max_concurrent_per_dc` | `u32` | `8` | keine | Begrenzung paralleler Reconnect-Worker pro DC. | `me_reconnect_max_concurrent_per_dc = 12` |
|
||||
| `general.me_reconnect_backoff_base_ms` | `u64` | `500` | keine | Initiales Reconnect-Backoff (ms). | `me_reconnect_backoff_base_ms = 250` |
|
||||
| `general.me_reconnect_backoff_cap_ms` | `u64` | `30000` | keine | Maximales Reconnect-Backoff (ms). | `me_reconnect_backoff_cap_ms = 10000` |
|
||||
| `general.me_reconnect_fast_retry_count` | `u32` | `16` | keine | Budget für Sofort-Retries vor längerem Backoff. | `me_reconnect_fast_retry_count = 8` |
|
||||
|
||||
### 3) Reinit/Hardswap, Secret-Rotation und Degradation
|
||||
|
||||
| Parameter | Typ | Default | Einschränkungen / Validierung | Laufzeiteffekt | Beispiel |
|
||||
|---|---|---:|---|---|---|
|
||||
| `general.hardswap` | `bool` | `true` | keine | Aktiviert generation-basierte Hardswap-Strategie für den ME-Pool. | `hardswap = true` |
|
||||
| `general.me_reinit_every_secs` | `u64` | `900` | muss `> 0` sein | Intervall für periodische ME-Reinitialisierung. | `me_reinit_every_secs = 600` |
|
||||
| `general.me_hardswap_warmup_delay_min_ms` | `u64` | `1000` | muss `<= me_hardswap_warmup_delay_max_ms` sein | Untere Grenze für Warmup-Dial-Abstände. | `me_hardswap_warmup_delay_min_ms = 500` |
|
||||
| `general.me_hardswap_warmup_delay_max_ms` | `u64` | `2000` | muss `> 0` sein | Obere Grenze für Warmup-Dial-Abstände. | `me_hardswap_warmup_delay_max_ms = 1200` |
|
||||
| `general.me_hardswap_warmup_extra_passes` | `u8` | `3` | Bereich `[0,10]` | Zusätzliche Warmup-Pässe nach dem Basispass. | `me_hardswap_warmup_extra_passes = 2` |
|
||||
| `general.me_hardswap_warmup_pass_backoff_base_ms` | `u64` | `500` | muss `> 0` sein | Basis-Backoff zwischen zusätzlichen Warmup-Pässen. | `me_hardswap_warmup_pass_backoff_base_ms = 400` |
|
||||
| `general.me_config_stable_snapshots` | `u8` | `2` | muss `> 0` sein | Anzahl identischer ME-Config-Snapshots vor Apply. | `me_config_stable_snapshots = 3` |
|
||||
| `general.me_config_apply_cooldown_secs` | `u64` | `300` | keine | Cooldown zwischen angewendeten ME-Map-Updates. | `me_config_apply_cooldown_secs = 120` |
|
||||
| `general.proxy_secret_stable_snapshots` | `u8` | `2` | muss `> 0` sein | Anzahl identischer Secret-Snapshots vor Rotation. | `proxy_secret_stable_snapshots = 3` |
|
||||
| `general.proxy_secret_rotate_runtime` | `bool` | `true` | keine | Aktiviert Runtime-Rotation des Proxy-Secrets. | `proxy_secret_rotate_runtime = true` |
|
||||
| `general.proxy_secret_len_max` | `usize` | `256` | Bereich `[32,4096]` | Obergrenze für akzeptierte Secret-Länge. | `proxy_secret_len_max = 512` |
|
||||
| `general.update_every` | `Option<u64>` | `300` | wenn gesetzt: `> 0`; bei `null`: Legacy-Min-Fallback | Einheitliches Refresh-Intervall für ME-Config + Secret-Updater. | `update_every = 300` |
|
||||
| `general.me_pool_drain_ttl_secs` | `u64` | `90` | keine | Zeitraum, in dem stale Writer noch als Fallback zulässig sind. | `me_pool_drain_ttl_secs = 120` |
|
||||
| `general.me_pool_min_fresh_ratio` | `f32` | `0.8` | Bereich `[0.0,1.0]` | Coverage-Schwelle vor Drain der alten Generation. | `me_pool_min_fresh_ratio = 0.9` |
|
||||
| `general.me_reinit_drain_timeout_secs` | `u64` | `120` | `0` = kein Force-Close; wenn `>0 && < TTL`, dann auf TTL angehoben | Force-Close-Timeout für draining stale Writer. | `me_reinit_drain_timeout_secs = 0` |
|
||||
| `general.auto_degradation_enabled` | `bool` | `true` | keine | Reserviertes Kompatibilitätsfeld in aktueller Revision (kein aktiver Runtime-Consumer). | `auto_degradation_enabled = true` |
|
||||
| `general.degradation_min_unavailable_dc_groups` | `u8` | `2` | keine | Reservierter Kompatibilitäts-Schwellenwert in aktueller Revision (kein aktiver Runtime-Consumer). | `degradation_min_unavailable_dc_groups = 2` |
|
||||
|
||||
## Deprecated / Legacy Parameter
|
||||
|
||||
| Parameter | Status | Ersatz | Aktuelles Verhalten | Migrationshinweis |
|
||||
|---|---|---|---|---|
|
||||
| `general.middle_proxy_nat_stun` | Deprecated | `network.stun_servers` | Wird nur dann in `network.stun_servers` gemerged, wenn `network.stun_servers` nicht explizit gesetzt ist. | Wert nach `network.stun_servers` verschieben, Legacy-Key entfernen. |
|
||||
| `general.middle_proxy_nat_stun_servers` | Deprecated | `network.stun_servers` | Wird nur dann in `network.stun_servers` gemerged, wenn `network.stun_servers` nicht explizit gesetzt ist. | Werte nach `network.stun_servers` verschieben, Legacy-Key entfernen. |
|
||||
| `general.proxy_secret_auto_reload_secs` | Deprecated | `general.update_every` | Nur aktiv, wenn `update_every = null` (Legacy-Fallback). | `general.update_every` explizit setzen, Legacy-Key entfernen. |
|
||||
| `general.proxy_config_auto_reload_secs` | Deprecated | `general.update_every` | Nur aktiv, wenn `update_every = null` (Legacy-Fallback). | `general.update_every` explizit setzen, Legacy-Key entfernen. |
|
||||
|
||||
## Wie Upstreams konfiguriert werden
|
||||
|
||||
### Upstream-Schema
|
||||
|
||||
| Feld | Gilt für | Typ | Pflicht | Default | Bedeutung |
|
||||
|---|---|---|---|---|---|
|
||||
| `[[upstreams]].type` | alle Upstreams | `"direct" \| "socks4" \| "socks5"` | ja | n/a | Upstream-Transporttyp. |
|
||||
| `[[upstreams]].weight` | alle Upstreams | `u16` | nein | `1` | Basisgewicht für weighted-random Auswahl. |
|
||||
| `[[upstreams]].enabled` | alle Upstreams | `bool` | nein | `true` | Deaktivierte Einträge werden beim Start ignoriert. |
|
||||
| `[[upstreams]].scopes` | alle Upstreams | `String` | nein | `""` | Komma-separierte Scope-Tags für Request-Routing. |
|
||||
| `interface` | `direct` | `Option<String>` | nein | `null` | Interface-Name (z. B. `eth0`) oder lokale Literal-IP. |
|
||||
| `bind_addresses` | `direct` | `Option<Vec<IpAddr>>` | nein | `null` | Explizite Source-IP-Kandidaten (strikter Vorrang vor `interface`). |
|
||||
| `address` | `socks4` | `String` | ja | n/a | SOCKS4-Server (`ip:port` oder `host:port`). |
|
||||
| `interface` | `socks4` | `Option<String>` | nein | `null` | Wird nur genutzt, wenn `address` als `ip:port` angegeben ist. |
|
||||
| `user_id` | `socks4` | `Option<String>` | nein | `null` | SOCKS4 User-ID für CONNECT. |
|
||||
| `address` | `socks5` | `String` | ja | n/a | SOCKS5-Server (`ip:port` oder `host:port`). |
|
||||
| `interface` | `socks5` | `Option<String>` | nein | `null` | Wird nur genutzt, wenn `address` als `ip:port` angegeben ist. |
|
||||
| `username` | `socks5` | `Option<String>` | nein | `null` | SOCKS5 Benutzername. |
|
||||
| `password` | `socks5` | `Option<String>` | nein | `null` | SOCKS5 Passwort. |
|
||||
|
||||
### Runtime-Regeln (wichtig)
|
||||
|
||||
1. Wenn `[[upstreams]]` fehlt, injiziert der Loader einen Default-`direct`-Upstream.
|
||||
2. Scope-Filterung basiert auf exaktem Token-Match:
|
||||
- mit Request-Scope -> nur Einträge, deren `scopes` genau dieses Token enthält;
|
||||
- ohne Request-Scope -> nur Einträge mit leerem `scopes`.
|
||||
3. Unter healthy Upstreams erfolgt die Auswahl per weighted random: `weight * latency_factor`.
|
||||
4. Gibt es im gefilterten Set keinen healthy Upstream, wird zufällig aus dem gefilterten Set gewählt.
|
||||
5. `direct`-Bind-Auflösung:
|
||||
- zuerst `bind_addresses` (nur gleiche IP-Familie wie Target);
|
||||
- bei `interface` (Name) + `bind_addresses` wird jede Candidate-IP gegen Interface-Adressen validiert;
|
||||
- ungültige Kandidaten werden mit `WARN` verworfen;
|
||||
- bleiben keine gültigen Kandidaten übrig, erfolgt unbound direct connect (`bind_ip=None`);
|
||||
- wenn `bind_addresses` nicht passt, wird `interface` verwendet (Literal-IP oder Interface-Primäradresse).
|
||||
6. Für `socks4/socks5` mit Hostname-`address` ist Interface-Binding nicht unterstützt und wird mit Warnung ignoriert.
|
||||
7. Runtime DNS Overrides werden für Hostname-Auflösung bei Upstream-Verbindungen genutzt.
|
||||
8. Im ME-Modus wird der gewählte Upstream auch für den ME-TCP-Dial-Pfad verwendet.
|
||||
9. Im ME-Modus ist bei `direct` mit bind/interface die STUN-Reflection bind-aware für KDF-Adressmaterial.
|
||||
10. Im ME-Modus werden bei SOCKS-Upstream `BND.ADDR/BND.PORT` für KDF verwendet, wenn gültig/öffentlich und gleiche IP-Familie.
|
||||
|
||||
## Upstream-Konfigurationsbeispiele
|
||||
|
||||
### Beispiel 1: Minimaler direct Upstream
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 1
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Beispiel 2: direct mit Interface + expliziten bind IPs
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
interface = "eth0"
|
||||
bind_addresses = ["192.168.1.100", "192.168.1.101"]
|
||||
weight = 3
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Beispiel 3: SOCKS5 Upstream mit Authentifizierung
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5"
|
||||
address = "198.51.100.30:1080"
|
||||
username = "proxy-user"
|
||||
password = "proxy-pass"
|
||||
weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Beispiel 4: Gemischte Upstreams mit Scopes
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 5
|
||||
enabled = true
|
||||
scopes = ""
|
||||
|
||||
[[upstreams]]
|
||||
type = "socks5"
|
||||
address = "203.0.113.40:1080"
|
||||
username = "edge"
|
||||
password = "edgepass"
|
||||
weight = 3
|
||||
enabled = true
|
||||
scopes = "premium,me"
|
||||
```
|
||||
|
||||
### Beispiel 5: ME-orientiertes Tuning-Profil
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = true
|
||||
proxy_secret_path = "proxy-secret"
|
||||
middle_proxy_nat_probe = true
|
||||
stun_nat_probe_concurrency = 16
|
||||
middle_proxy_pool_size = 12
|
||||
me_keepalive_enabled = true
|
||||
me_keepalive_interval_secs = 20
|
||||
me_keepalive_jitter_secs = 4
|
||||
me_reconnect_max_concurrent_per_dc = 12
|
||||
me_reconnect_backoff_base_ms = 300
|
||||
me_reconnect_backoff_cap_ms = 10000
|
||||
me_reconnect_fast_retry_count = 10
|
||||
hardswap = true
|
||||
me_reinit_every_secs = 600
|
||||
me_hardswap_warmup_delay_min_ms = 500
|
||||
me_hardswap_warmup_delay_max_ms = 1200
|
||||
me_hardswap_warmup_extra_passes = 2
|
||||
me_hardswap_warmup_pass_backoff_base_ms = 400
|
||||
me_config_stable_snapshots = 3
|
||||
me_config_apply_cooldown_secs = 120
|
||||
proxy_secret_stable_snapshots = 3
|
||||
proxy_secret_rotate_runtime = true
|
||||
proxy_secret_len_max = 512
|
||||
update_every = 300
|
||||
me_pool_drain_ttl_secs = 120
|
||||
me_pool_min_fresh_ratio = 0.9
|
||||
me_reinit_drain_timeout_secs = 180
|
||||
|
||||
[timeouts]
|
||||
me_one_retry = 8
|
||||
me_one_timeout_ms = 1200
|
||||
|
||||
[network]
|
||||
stun_use = true
|
||||
stun_tcp_fallback = true
|
||||
stun_servers = [
|
||||
"stun1.l.google.com:19302",
|
||||
"stun2.l.google.com:19302"
|
||||
]
|
||||
http_ip_detect_urls = [
|
||||
"https://api.ipify.org",
|
||||
"https://ifconfig.me/ip"
|
||||
]
|
||||
```
|
||||
219
docs/TUNING.en.md
Normal file
219
docs/TUNING.en.md
Normal file
@@ -0,0 +1,219 @@
|
||||
# Telemt Tuning Guide: Middle-End and Upstreams
|
||||
|
||||
This document describes the current runtime behavior for Middle-End (ME) and upstream routing based on:
|
||||
- `src/config/types.rs`
|
||||
- `src/config/defaults.rs`
|
||||
- `src/config/load.rs`
|
||||
- `src/transport/upstream.rs`
|
||||
|
||||
Defaults below are code defaults (used when a key is omitted), not necessarily values from `config.full.toml` examples.
|
||||
|
||||
## Middle-End Parameters
|
||||
|
||||
### 1) Core ME mode, NAT, and STUN
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Runtime effect | Example |
|
||||
|---|---|---:|---|---|---|
|
||||
| `general.use_middle_proxy` | `bool` | `true` | none | Enables ME transport mode. If `false`, Direct mode is used. | `use_middle_proxy = true` |
|
||||
| `general.proxy_secret_path` | `Option<String>` | `"proxy-secret"` | path may be `null` | Path to Telegram infrastructure proxy-secret file. | `proxy_secret_path = "proxy-secret"` |
|
||||
| `general.middle_proxy_nat_ip` | `Option<IpAddr>` | `null` | valid IP when set | Manual public NAT IP override for ME address material. | `middle_proxy_nat_ip = "203.0.113.10"` |
|
||||
| `general.middle_proxy_nat_probe` | `bool` | `true` | auto-forced to `true` when `use_middle_proxy=true` | Enables ME NAT probing. | `middle_proxy_nat_probe = true` |
|
||||
| `general.stun_nat_probe_concurrency` | `usize` | `8` | must be `> 0` | Max parallel STUN probes during NAT discovery. | `stun_nat_probe_concurrency = 16` |
|
||||
| `network.stun_use` | `bool` | `true` | none | Global STUN switch. If `false`, STUN probing is disabled. | `stun_use = true` |
|
||||
| `network.stun_servers` | `Vec<String>` | built-in public pool | deduplicated + empty values removed | Primary STUN server list for NAT/public endpoint discovery. | `stun_servers = ["stun1.l.google.com:19302"]` |
|
||||
| `network.stun_tcp_fallback` | `bool` | `true` | none | Enables TCP fallback path when UDP STUN is blocked. | `stun_tcp_fallback = true` |
|
||||
| `network.http_ip_detect_urls` | `Vec<String>` | `ifconfig.me` + `api.ipify.org` | none | HTTP fallback for public IPv4 detection if STUN is unavailable. | `http_ip_detect_urls = ["https://api.ipify.org"]` |
|
||||
| `general.stun_iface_mismatch_ignore` | `bool` | `false` | none | Reserved flag in current revision (not consumed by runtime path). | `stun_iface_mismatch_ignore = false` |
|
||||
| `timeouts.me_one_retry` | `u8` | `12` | none | Fast reconnect attempts for single-endpoint DC cases. | `me_one_retry = 6` |
|
||||
| `timeouts.me_one_timeout_ms` | `u64` | `1200` | none | Timeout per quick single-endpoint attempt (ms). | `me_one_timeout_ms = 1500` |
|
||||
|
||||
### 2) Pool size, keepalive, and reconnect policy
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Runtime effect | Example |
|
||||
|---|---|---:|---|---|---|
|
||||
| `general.middle_proxy_pool_size` | `usize` | `8` | none | Target active ME writer pool size. | `middle_proxy_pool_size = 12` |
|
||||
| `general.middle_proxy_warm_standby` | `usize` | `16` | none | Reserved compatibility field in current revision (no active runtime consumer). | `middle_proxy_warm_standby = 16` |
|
||||
| `general.me_keepalive_enabled` | `bool` | `true` | none | Enables periodic ME keepalive/ping traffic. | `me_keepalive_enabled = true` |
|
||||
| `general.me_keepalive_interval_secs` | `u64` | `25` | none | Base keepalive interval (seconds). | `me_keepalive_interval_secs = 20` |
|
||||
| `general.me_keepalive_jitter_secs` | `u64` | `5` | none | Keepalive jitter to avoid synchronization bursts. | `me_keepalive_jitter_secs = 3` |
|
||||
| `general.me_keepalive_payload_random` | `bool` | `true` | none | Randomizes keepalive payload bytes. | `me_keepalive_payload_random = true` |
|
||||
| `general.me_warmup_stagger_enabled` | `bool` | `true` | none | Staggers extra ME warmup dials to avoid spikes. | `me_warmup_stagger_enabled = true` |
|
||||
| `general.me_warmup_step_delay_ms` | `u64` | `500` | none | Base delay between warmup dial steps (ms). | `me_warmup_step_delay_ms = 300` |
|
||||
| `general.me_warmup_step_jitter_ms` | `u64` | `300` | none | Additional random delay for warmup steps (ms). | `me_warmup_step_jitter_ms = 200` |
|
||||
| `general.me_reconnect_max_concurrent_per_dc` | `u32` | `8` | none | Limits concurrent reconnect workers per DC in health recovery. | `me_reconnect_max_concurrent_per_dc = 12` |
|
||||
| `general.me_reconnect_backoff_base_ms` | `u64` | `500` | none | Initial reconnect backoff (ms). | `me_reconnect_backoff_base_ms = 250` |
|
||||
| `general.me_reconnect_backoff_cap_ms` | `u64` | `30000` | none | Maximum reconnect backoff (ms). | `me_reconnect_backoff_cap_ms = 10000` |
|
||||
| `general.me_reconnect_fast_retry_count` | `u32` | `16` | none | Immediate retry budget before long backoff behavior. | `me_reconnect_fast_retry_count = 8` |
|
||||
|
||||
### 3) Reinit/hardswap, secret rotation, and degradation
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Runtime effect | Example |
|
||||
|---|---|---:|---|---|---|
|
||||
| `general.hardswap` | `bool` | `true` | none | Enables generation-based ME hardswap strategy. | `hardswap = true` |
|
||||
| `general.me_reinit_every_secs` | `u64` | `900` | must be `> 0` | Periodic ME reinit interval. | `me_reinit_every_secs = 600` |
|
||||
| `general.me_hardswap_warmup_delay_min_ms` | `u64` | `1000` | must be `<= me_hardswap_warmup_delay_max_ms` | Lower bound for hardswap warmup dial spacing. | `me_hardswap_warmup_delay_min_ms = 500` |
|
||||
| `general.me_hardswap_warmup_delay_max_ms` | `u64` | `2000` | must be `> 0` | Upper bound for hardswap warmup dial spacing. | `me_hardswap_warmup_delay_max_ms = 1200` |
|
||||
| `general.me_hardswap_warmup_extra_passes` | `u8` | `3` | must be within `[0,10]` | Additional warmup passes after base pass. | `me_hardswap_warmup_extra_passes = 2` |
|
||||
| `general.me_hardswap_warmup_pass_backoff_base_ms` | `u64` | `500` | must be `> 0` | Base backoff between extra warmup passes. | `me_hardswap_warmup_pass_backoff_base_ms = 400` |
|
||||
| `general.me_config_stable_snapshots` | `u8` | `2` | must be `> 0` | Number of identical ME config snapshots required before apply. | `me_config_stable_snapshots = 3` |
|
||||
| `general.me_config_apply_cooldown_secs` | `u64` | `300` | none | Cooldown between applied ME map updates. | `me_config_apply_cooldown_secs = 120` |
|
||||
| `general.proxy_secret_stable_snapshots` | `u8` | `2` | must be `> 0` | Number of identical proxy-secret snapshots required before rotation. | `proxy_secret_stable_snapshots = 3` |
|
||||
| `general.proxy_secret_rotate_runtime` | `bool` | `true` | none | Enables runtime proxy-secret rotation. | `proxy_secret_rotate_runtime = true` |
|
||||
| `general.proxy_secret_len_max` | `usize` | `256` | must be within `[32,4096]` | Upper limit for accepted proxy-secret length. | `proxy_secret_len_max = 512` |
|
||||
| `general.update_every` | `Option<u64>` | `300` | if set: must be `> 0`; if `null`: legacy min fallback | Unified refresh interval for ME config + secret updater. | `update_every = 300` |
|
||||
| `general.me_pool_drain_ttl_secs` | `u64` | `90` | none | Time window where stale writers remain fallback-eligible. | `me_pool_drain_ttl_secs = 120` |
|
||||
| `general.me_pool_min_fresh_ratio` | `f32` | `0.8` | must be within `[0.0,1.0]` | Coverage threshold before stale generation can be drained. | `me_pool_min_fresh_ratio = 0.9` |
|
||||
| `general.me_reinit_drain_timeout_secs` | `u64` | `120` | `0` means no force-close; if `>0 && < TTL` it is bumped to TTL | Force-close timeout for draining stale writers. | `me_reinit_drain_timeout_secs = 0` |
|
||||
| `general.auto_degradation_enabled` | `bool` | `true` | none | Reserved compatibility flag in current revision (no active runtime consumer). | `auto_degradation_enabled = true` |
|
||||
| `general.degradation_min_unavailable_dc_groups` | `u8` | `2` | none | Reserved compatibility threshold in current revision (no active runtime consumer). | `degradation_min_unavailable_dc_groups = 2` |
|
||||
|
||||
## Deprecated / Legacy Parameters
|
||||
|
||||
| Parameter | Status | Replacement | Current behavior | Migration recommendation |
|
||||
|---|---|---|---|---|
|
||||
| `general.middle_proxy_nat_stun` | Deprecated | `network.stun_servers` | Merged into `network.stun_servers` only when `network.stun_servers` is not explicitly set. | Move value into `network.stun_servers` and remove legacy key. |
|
||||
| `general.middle_proxy_nat_stun_servers` | Deprecated | `network.stun_servers` | Merged into `network.stun_servers` only when `network.stun_servers` is not explicitly set. | Move values into `network.stun_servers` and remove legacy key. |
|
||||
| `general.proxy_secret_auto_reload_secs` | Deprecated | `general.update_every` | Used only when `update_every = null` (legacy fallback path). | Set `general.update_every` explicitly and remove legacy key. |
|
||||
| `general.proxy_config_auto_reload_secs` | Deprecated | `general.update_every` | Used only when `update_every = null` (legacy fallback path). | Set `general.update_every` explicitly and remove legacy key. |
|
||||
|
||||
## How Upstreams Are Configured
|
||||
|
||||
### Upstream schema
|
||||
|
||||
| Field | Applies to | Type | Required | Default | Meaning |
|
||||
|---|---|---|---|---|---|
|
||||
| `[[upstreams]].type` | all upstreams | `"direct" \| "socks4" \| "socks5"` | yes | n/a | Upstream transport type. |
|
||||
| `[[upstreams]].weight` | all upstreams | `u16` | no | `1` | Base weight for weighted-random selection. |
|
||||
| `[[upstreams]].enabled` | all upstreams | `bool` | no | `true` | Disabled entries are ignored at startup. |
|
||||
| `[[upstreams]].scopes` | all upstreams | `String` | no | `""` | Comma-separated scope tags for request-level routing. |
|
||||
| `interface` | `direct` | `Option<String>` | no | `null` | Interface name (e.g. `eth0`) or literal local IP for bind selection. |
|
||||
| `bind_addresses` | `direct` | `Option<Vec<IpAddr>>` | no | `null` | Explicit local source IP candidates (strict priority over `interface`). |
|
||||
| `address` | `socks4` | `String` | yes | n/a | SOCKS4 server endpoint (`ip:port` or `host:port`). |
|
||||
| `interface` | `socks4` | `Option<String>` | no | `null` | Used only for SOCKS server `ip:port` dial path. |
|
||||
| `user_id` | `socks4` | `Option<String>` | no | `null` | SOCKS4 user ID for CONNECT request. |
|
||||
| `address` | `socks5` | `String` | yes | n/a | SOCKS5 server endpoint (`ip:port` or `host:port`). |
|
||||
| `interface` | `socks5` | `Option<String>` | no | `null` | Used only for SOCKS server `ip:port` dial path. |
|
||||
| `username` | `socks5` | `Option<String>` | no | `null` | SOCKS5 username auth. |
|
||||
| `password` | `socks5` | `Option<String>` | no | `null` | SOCKS5 password auth. |
|
||||
|
||||
### Runtime rules (important)
|
||||
|
||||
1. If `[[upstreams]]` is omitted, loader injects one default `direct` upstream.
|
||||
2. Scope filtering is exact-token based:
|
||||
- when request scope is set -> only entries whose `scopes` contains that exact token;
|
||||
- when request scope is not set -> only entries with empty `scopes`.
|
||||
3. Healthy upstreams are selected by weighted random using: `weight * latency_factor`.
|
||||
4. If no healthy upstream exists in filtered set, random selection is used among filtered entries.
|
||||
5. `direct` bind resolution order:
|
||||
- `bind_addresses` candidates (same IP family as target) first;
|
||||
- if `interface` is an interface name and `bind_addresses` is set, each candidate IP is validated against addresses currently assigned to that interface;
|
||||
- invalid candidates are dropped with `WARN`;
|
||||
- if no valid candidate remains, connection falls back to unbound direct connect (`bind_ip=None`);
|
||||
- if no `bind_addresses` candidate, `interface` is used (literal IP or resolved interface primary IP).
|
||||
6. For `socks4/socks5` with `address` as hostname, interface binding is not supported and is ignored with warning.
|
||||
7. Runtime DNS overrides are used for upstream hostname resolution.
|
||||
8. In ME mode, the selected upstream is also used for ME TCP dial path.
|
||||
9. In ME mode for `direct` upstream with bind/interface, STUN reflection logic is bind-aware for KDF source material.
|
||||
10. In ME mode for SOCKS upstream, SOCKS `BND.ADDR/BND.PORT` is used for KDF when it is valid/public for the same family.
|
||||
|
||||
## Upstream Configuration Examples
|
||||
|
||||
### Example 1: Minimal direct upstream
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 1
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Example 2: Direct with interface + explicit bind addresses
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
interface = "eth0"
|
||||
bind_addresses = ["192.168.1.100", "192.168.1.101"]
|
||||
weight = 3
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Example 3: SOCKS5 upstream with authentication
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5"
|
||||
address = "198.51.100.30:1080"
|
||||
username = "proxy-user"
|
||||
password = "proxy-pass"
|
||||
weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Example 4: Mixed upstreams with scopes
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 5
|
||||
enabled = true
|
||||
scopes = ""
|
||||
|
||||
[[upstreams]]
|
||||
type = "socks5"
|
||||
address = "203.0.113.40:1080"
|
||||
username = "edge"
|
||||
password = "edgepass"
|
||||
weight = 3
|
||||
enabled = true
|
||||
scopes = "premium,me"
|
||||
```
|
||||
|
||||
### Example 5: ME-focused tuning profile
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = true
|
||||
proxy_secret_path = "proxy-secret"
|
||||
middle_proxy_nat_probe = true
|
||||
stun_nat_probe_concurrency = 16
|
||||
middle_proxy_pool_size = 12
|
||||
me_keepalive_enabled = true
|
||||
me_keepalive_interval_secs = 20
|
||||
me_keepalive_jitter_secs = 4
|
||||
me_reconnect_max_concurrent_per_dc = 12
|
||||
me_reconnect_backoff_base_ms = 300
|
||||
me_reconnect_backoff_cap_ms = 10000
|
||||
me_reconnect_fast_retry_count = 10
|
||||
hardswap = true
|
||||
me_reinit_every_secs = 600
|
||||
me_hardswap_warmup_delay_min_ms = 500
|
||||
me_hardswap_warmup_delay_max_ms = 1200
|
||||
me_hardswap_warmup_extra_passes = 2
|
||||
me_hardswap_warmup_pass_backoff_base_ms = 400
|
||||
me_config_stable_snapshots = 3
|
||||
me_config_apply_cooldown_secs = 120
|
||||
proxy_secret_stable_snapshots = 3
|
||||
proxy_secret_rotate_runtime = true
|
||||
proxy_secret_len_max = 512
|
||||
update_every = 300
|
||||
me_pool_drain_ttl_secs = 120
|
||||
me_pool_min_fresh_ratio = 0.9
|
||||
me_reinit_drain_timeout_secs = 180
|
||||
|
||||
[timeouts]
|
||||
me_one_retry = 8
|
||||
me_one_timeout_ms = 1200
|
||||
|
||||
[network]
|
||||
stun_use = true
|
||||
stun_tcp_fallback = true
|
||||
stun_servers = [
|
||||
"stun1.l.google.com:19302",
|
||||
"stun2.l.google.com:19302"
|
||||
]
|
||||
http_ip_detect_urls = [
|
||||
"https://api.ipify.org",
|
||||
"https://ifconfig.me/ip"
|
||||
]
|
||||
```
|
||||
219
docs/TUNING.ru.md
Normal file
219
docs/TUNING.ru.md
Normal file
@@ -0,0 +1,219 @@
|
||||
# Руководство по тюнингу Telemt: Middle-End и Upstreams
|
||||
|
||||
Документ описывает актуальное поведение Middle-End (ME) и маршрутизации через upstream на основе:
|
||||
- `src/config/types.rs`
|
||||
- `src/config/defaults.rs`
|
||||
- `src/config/load.rs`
|
||||
- `src/transport/upstream.rs`
|
||||
|
||||
Значения `Default` ниже — это значения из кода при отсутствии ключа в конфиге, а не обязательно значения из примеров `config.full.toml`.
|
||||
|
||||
## Параметры Middle-End
|
||||
|
||||
### 1) Базовый режим ME, NAT и STUN
|
||||
|
||||
| Параметр | Тип | Default | Ограничения / валидация | Влияние на runtime | Пример |
|
||||
|---|---|---:|---|---|---|
|
||||
| `general.use_middle_proxy` | `bool` | `true` | нет | Включает транспорт ME. При `false` используется Direct-режим. | `use_middle_proxy = true` |
|
||||
| `general.proxy_secret_path` | `Option<String>` | `"proxy-secret"` | путь может быть `null` | Путь к инфраструктурному proxy-secret Telegram. | `proxy_secret_path = "proxy-secret"` |
|
||||
| `general.middle_proxy_nat_ip` | `Option<IpAddr>` | `null` | валидный IP при задании | Ручной override публичного NAT IP для адресного материала ME. | `middle_proxy_nat_ip = "203.0.113.10"` |
|
||||
| `general.middle_proxy_nat_probe` | `bool` | `true` | авто-принудительно `true`, если `use_middle_proxy=true` | Включает NAT probing для ME. | `middle_proxy_nat_probe = true` |
|
||||
| `general.stun_nat_probe_concurrency` | `usize` | `8` | должно быть `> 0` | Максимум параллельных STUN-проб при NAT-детекте. | `stun_nat_probe_concurrency = 16` |
|
||||
| `network.stun_use` | `bool` | `true` | нет | Глобальный переключатель STUN. При `false` STUN отключен. | `stun_use = true` |
|
||||
| `network.stun_servers` | `Vec<String>` | встроенный публичный пул | удаляются дубликаты и пустые значения | Основной список STUN-серверов для NAT/public endpoint discovery. | `stun_servers = ["stun1.l.google.com:19302"]` |
|
||||
| `network.stun_tcp_fallback` | `bool` | `true` | нет | Включает TCP fallback, если UDP STUN недоступен. | `stun_tcp_fallback = true` |
|
||||
| `network.http_ip_detect_urls` | `Vec<String>` | `ifconfig.me` + `api.ipify.org` | нет | HTTP fallback для определения публичного IPv4 при недоступности STUN. | `http_ip_detect_urls = ["https://api.ipify.org"]` |
|
||||
| `general.stun_iface_mismatch_ignore` | `bool` | `false` | нет | Зарезервированный флаг в текущей ревизии (runtime его не использует). | `stun_iface_mismatch_ignore = false` |
|
||||
| `timeouts.me_one_retry` | `u8` | `12` | нет | Количество быстрых reconnect-попыток для DC с одним endpoint. | `me_one_retry = 6` |
|
||||
| `timeouts.me_one_timeout_ms` | `u64` | `1200` | нет | Таймаут одной быстрой попытки (мс). | `me_one_timeout_ms = 1500` |
|
||||
|
||||
### 2) Размер пула, keepalive и reconnect-политика
|
||||
|
||||
| Параметр | Тип | Default | Ограничения / валидация | Влияние на runtime | Пример |
|
||||
|---|---|---:|---|---|---|
|
||||
| `general.middle_proxy_pool_size` | `usize` | `8` | нет | Целевой размер активного пула ME-writer соединений. | `middle_proxy_pool_size = 12` |
|
||||
| `general.middle_proxy_warm_standby` | `usize` | `16` | нет | Зарезервированное поле совместимости в текущей ревизии (активного runtime-consumer нет). | `middle_proxy_warm_standby = 16` |
|
||||
| `general.me_keepalive_enabled` | `bool` | `true` | нет | Включает периодические keepalive/ping кадры ME. | `me_keepalive_enabled = true` |
|
||||
| `general.me_keepalive_interval_secs` | `u64` | `25` | нет | Базовый интервал keepalive (сек). | `me_keepalive_interval_secs = 20` |
|
||||
| `general.me_keepalive_jitter_secs` | `u64` | `5` | нет | Джиттер keepalive для предотвращения синхронных всплесков. | `me_keepalive_jitter_secs = 3` |
|
||||
| `general.me_keepalive_payload_random` | `bool` | `true` | нет | Рандомизирует payload keepalive-кадров. | `me_keepalive_payload_random = true` |
|
||||
| `general.me_warmup_stagger_enabled` | `bool` | `true` | нет | Включает staggered warmup дополнительных ME-коннектов. | `me_warmup_stagger_enabled = true` |
|
||||
| `general.me_warmup_step_delay_ms` | `u64` | `500` | нет | Базовая задержка между шагами warmup (мс). | `me_warmup_step_delay_ms = 300` |
|
||||
| `general.me_warmup_step_jitter_ms` | `u64` | `300` | нет | Дополнительный случайный warmup-джиттер (мс). | `me_warmup_step_jitter_ms = 200` |
|
||||
| `general.me_reconnect_max_concurrent_per_dc` | `u32` | `8` | нет | Ограничивает параллельные reconnect worker'ы на один DC. | `me_reconnect_max_concurrent_per_dc = 12` |
|
||||
| `general.me_reconnect_backoff_base_ms` | `u64` | `500` | нет | Начальный backoff reconnect (мс). | `me_reconnect_backoff_base_ms = 250` |
|
||||
| `general.me_reconnect_backoff_cap_ms` | `u64` | `30000` | нет | Верхняя граница backoff reconnect (мс). | `me_reconnect_backoff_cap_ms = 10000` |
|
||||
| `general.me_reconnect_fast_retry_count` | `u32` | `16` | нет | Бюджет быстрых retry до длинного backoff. | `me_reconnect_fast_retry_count = 8` |
|
||||
|
||||
### 3) Reinit/hardswap, ротация секрета и деградация
|
||||
|
||||
| Параметр | Тип | Default | Ограничения / валидация | Влияние на runtime | Пример |
|
||||
|---|---|---:|---|---|---|
|
||||
| `general.hardswap` | `bool` | `true` | нет | Включает generation-based стратегию hardswap для ME-пула. | `hardswap = true` |
|
||||
| `general.me_reinit_every_secs` | `u64` | `900` | должно быть `> 0` | Интервал периодического reinit ME-пула. | `me_reinit_every_secs = 600` |
|
||||
| `general.me_hardswap_warmup_delay_min_ms` | `u64` | `1000` | должно быть `<= me_hardswap_warmup_delay_max_ms` | Нижняя граница пауз между warmup dial попытками. | `me_hardswap_warmup_delay_min_ms = 500` |
|
||||
| `general.me_hardswap_warmup_delay_max_ms` | `u64` | `2000` | должно быть `> 0` | Верхняя граница пауз между warmup dial попытками. | `me_hardswap_warmup_delay_max_ms = 1200` |
|
||||
| `general.me_hardswap_warmup_extra_passes` | `u8` | `3` | диапазон `[0,10]` | Дополнительные warmup-проходы после базового. | `me_hardswap_warmup_extra_passes = 2` |
|
||||
| `general.me_hardswap_warmup_pass_backoff_base_ms` | `u64` | `500` | должно быть `> 0` | Базовый backoff между extra-pass в warmup. | `me_hardswap_warmup_pass_backoff_base_ms = 400` |
|
||||
| `general.me_config_stable_snapshots` | `u8` | `2` | должно быть `> 0` | Количество одинаковых snapshot перед применением ME map update. | `me_config_stable_snapshots = 3` |
|
||||
| `general.me_config_apply_cooldown_secs` | `u64` | `300` | нет | Cooldown между применёнными обновлениями ME map. | `me_config_apply_cooldown_secs = 120` |
|
||||
| `general.proxy_secret_stable_snapshots` | `u8` | `2` | должно быть `> 0` | Количество одинаковых snapshot перед runtime-rotation proxy-secret. | `proxy_secret_stable_snapshots = 3` |
|
||||
| `general.proxy_secret_rotate_runtime` | `bool` | `true` | нет | Включает runtime-ротацию proxy-secret. | `proxy_secret_rotate_runtime = true` |
|
||||
| `general.proxy_secret_len_max` | `usize` | `256` | диапазон `[32,4096]` | Верхний лимит длины принимаемого proxy-secret. | `proxy_secret_len_max = 512` |
|
||||
| `general.update_every` | `Option<u64>` | `300` | если задано: `> 0`; если `null`: fallback на legacy минимум | Единый интервал refresh для ME config + secret updater. | `update_every = 300` |
|
||||
| `general.me_pool_drain_ttl_secs` | `u64` | `90` | нет | Время, когда stale writer ещё может использоваться как fallback. | `me_pool_drain_ttl_secs = 120` |
|
||||
| `general.me_pool_min_fresh_ratio` | `f32` | `0.8` | диапазон `[0.0,1.0]` | Порог покрытия fresh-поколения перед drain старого поколения. | `me_pool_min_fresh_ratio = 0.9` |
|
||||
| `general.me_reinit_drain_timeout_secs` | `u64` | `120` | `0` = без force-close; если `>0 && < TTL`, поднимается до TTL | Таймаут force-close для draining stale writer. | `me_reinit_drain_timeout_secs = 0` |
|
||||
| `general.auto_degradation_enabled` | `bool` | `true` | нет | Зарезервированный флаг совместимости в текущей ревизии (активного runtime-consumer нет). | `auto_degradation_enabled = true` |
|
||||
| `general.degradation_min_unavailable_dc_groups` | `u8` | `2` | нет | Зарезервированный порог совместимости в текущей ревизии (активного runtime-consumer нет). | `degradation_min_unavailable_dc_groups = 2` |
|
||||
|
||||
## Устаревшие / legacy параметры
|
||||
|
||||
| Параметр | Статус | Замена | Текущее поведение | Рекомендация миграции |
|
||||
|---|---|---|---|---|
|
||||
| `general.middle_proxy_nat_stun` | Deprecated | `network.stun_servers` | Добавляется в `network.stun_servers`, только если `network.stun_servers` не задан явно. | Перенести значение в `network.stun_servers`, legacy-ключ удалить. |
|
||||
| `general.middle_proxy_nat_stun_servers` | Deprecated | `network.stun_servers` | Добавляется в `network.stun_servers`, только если `network.stun_servers` не задан явно. | Перенести значения в `network.stun_servers`, legacy-ключ удалить. |
|
||||
| `general.proxy_secret_auto_reload_secs` | Deprecated | `general.update_every` | Используется только если `update_every = null` (legacy fallback). | Явно задать `general.update_every`, legacy-ключ удалить. |
|
||||
| `general.proxy_config_auto_reload_secs` | Deprecated | `general.update_every` | Используется только если `update_every = null` (legacy fallback). | Явно задать `general.update_every`, legacy-ключ удалить. |
|
||||
|
||||
## Как конфигурируются Upstreams
|
||||
|
||||
### Схема upstream
|
||||
|
||||
| Поле | Применимость | Тип | Обязательно | Default | Назначение |
|
||||
|---|---|---|---|---|---|
|
||||
| `[[upstreams]].type` | все upstream | `"direct" \| "socks4" \| "socks5"` | да | n/a | Тип upstream транспорта. |
|
||||
| `[[upstreams]].weight` | все upstream | `u16` | нет | `1` | Базовый вес в weighted-random выборе. |
|
||||
| `[[upstreams]].enabled` | все upstream | `bool` | нет | `true` | Выключенные записи игнорируются на старте. |
|
||||
| `[[upstreams]].scopes` | все upstream | `String` | нет | `""` | Список scope-токенов через запятую для маршрутизации. |
|
||||
| `interface` | `direct` | `Option<String>` | нет | `null` | Имя интерфейса (например `eth0`) или literal локальный IP. |
|
||||
| `bind_addresses` | `direct` | `Option<Vec<IpAddr>>` | нет | `null` | Явные кандидаты source IP (имеют приоритет над `interface`). |
|
||||
| `address` | `socks4` | `String` | да | n/a | Адрес SOCKS4 сервера (`ip:port` или `host:port`). |
|
||||
| `interface` | `socks4` | `Option<String>` | нет | `null` | Используется только если `address` задан как `ip:port`. |
|
||||
| `user_id` | `socks4` | `Option<String>` | нет | `null` | SOCKS4 user ID в CONNECT-запросе. |
|
||||
| `address` | `socks5` | `String` | да | n/a | Адрес SOCKS5 сервера (`ip:port` или `host:port`). |
|
||||
| `interface` | `socks5` | `Option<String>` | нет | `null` | Используется только если `address` задан как `ip:port`. |
|
||||
| `username` | `socks5` | `Option<String>` | нет | `null` | Логин SOCKS5 auth. |
|
||||
| `password` | `socks5` | `Option<String>` | нет | `null` | Пароль SOCKS5 auth. |
|
||||
|
||||
### Runtime-правила
|
||||
|
||||
1. Если `[[upstreams]]` отсутствует, loader добавляет один upstream `direct` по умолчанию.
|
||||
2. Scope-фильтрация — по точному совпадению токена:
|
||||
- если scope запроса задан -> используются только записи, где `scopes` содержит такой же токен;
|
||||
- если scope запроса не задан -> используются только записи с пустым `scopes`.
|
||||
3. Среди healthy upstream используется weighted-random выбор: `weight * latency_factor`.
|
||||
4. Если в отфильтрованном наборе нет healthy upstream, выбирается случайный из отфильтрованных.
|
||||
5. Порядок выбора bind для `direct`:
|
||||
- сначала `bind_addresses` (только IP нужного семейства);
|
||||
- если одновременно заданы `interface` (имя) и `bind_addresses`, каждый IP проверяется на принадлежность интерфейсу;
|
||||
- несовпадающие IP отбрасываются с `WARN`;
|
||||
- если валидных IP не осталось, используется unbound direct connect (`bind_ip=None`);
|
||||
- если `bind_addresses` не подходит, применяется `interface` (literal IP или адрес интерфейса).
|
||||
6. Для `socks4/socks5` с `address` в виде hostname интерфейсный bind не поддерживается и игнорируется с предупреждением.
|
||||
7. Runtime DNS overrides применяются к резолвингу hostname в upstream-подключениях.
|
||||
8. В ME-режиме выбранный upstream также используется для ME TCP dial path.
|
||||
9. В ME-режиме для `direct` upstream с bind/interface STUN-рефлексия выполняется bind-aware для KDF материала.
|
||||
10. В ME-режиме для SOCKS upstream используются `BND.ADDR/BND.PORT` для KDF, если адрес валиден/публичен и соответствует IP family.
|
||||
|
||||
## Примеры конфигурации Upstreams
|
||||
|
||||
### Пример 1: минимальный direct upstream
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 1
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Пример 2: direct с interface + явными bind IP
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
interface = "eth0"
|
||||
bind_addresses = ["192.168.1.100", "192.168.1.101"]
|
||||
weight = 3
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Пример 3: SOCKS5 upstream с аутентификацией
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5"
|
||||
address = "198.51.100.30:1080"
|
||||
username = "proxy-user"
|
||||
password = "proxy-pass"
|
||||
weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Пример 4: смешанные upstream с scopes
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 5
|
||||
enabled = true
|
||||
scopes = ""
|
||||
|
||||
[[upstreams]]
|
||||
type = "socks5"
|
||||
address = "203.0.113.40:1080"
|
||||
username = "edge"
|
||||
password = "edgepass"
|
||||
weight = 3
|
||||
enabled = true
|
||||
scopes = "premium,me"
|
||||
```
|
||||
|
||||
### Пример 5: профиль тюнинга под ME
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = true
|
||||
proxy_secret_path = "proxy-secret"
|
||||
middle_proxy_nat_probe = true
|
||||
stun_nat_probe_concurrency = 16
|
||||
middle_proxy_pool_size = 12
|
||||
me_keepalive_enabled = true
|
||||
me_keepalive_interval_secs = 20
|
||||
me_keepalive_jitter_secs = 4
|
||||
me_reconnect_max_concurrent_per_dc = 12
|
||||
me_reconnect_backoff_base_ms = 300
|
||||
me_reconnect_backoff_cap_ms = 10000
|
||||
me_reconnect_fast_retry_count = 10
|
||||
hardswap = true
|
||||
me_reinit_every_secs = 600
|
||||
me_hardswap_warmup_delay_min_ms = 500
|
||||
me_hardswap_warmup_delay_max_ms = 1200
|
||||
me_hardswap_warmup_extra_passes = 2
|
||||
me_hardswap_warmup_pass_backoff_base_ms = 400
|
||||
me_config_stable_snapshots = 3
|
||||
me_config_apply_cooldown_secs = 120
|
||||
proxy_secret_stable_snapshots = 3
|
||||
proxy_secret_rotate_runtime = true
|
||||
proxy_secret_len_max = 512
|
||||
update_every = 300
|
||||
me_pool_drain_ttl_secs = 120
|
||||
me_pool_min_fresh_ratio = 0.9
|
||||
me_reinit_drain_timeout_secs = 180
|
||||
|
||||
[timeouts]
|
||||
me_one_retry = 8
|
||||
me_one_timeout_ms = 1200
|
||||
|
||||
[network]
|
||||
stun_use = true
|
||||
stun_tcp_fallback = true
|
||||
stun_servers = [
|
||||
"stun1.l.google.com:19302",
|
||||
"stun2.l.google.com:19302"
|
||||
]
|
||||
http_ip_detect_urls = [
|
||||
"https://api.ipify.org",
|
||||
"https://ifconfig.me/ip"
|
||||
]
|
||||
```
|
||||
321
docs/XRAY-SINGBOX-ROUTING.ru.md
Normal file
321
docs/XRAY-SINGBOX-ROUTING.ru.md
Normal file
@@ -0,0 +1,321 @@
|
||||
# SNI-маршрутизация в xray-core / sing-box + TLS-fronting
|
||||
|
||||
## Термины (в контексте этого кейса)
|
||||
|
||||
- **TLS-fronting домен** — домен, который фигурирует в TLS ClientHello как **SNI** (например, `petrovich.ru`): он используется как "маска" на L7 и как ключ маршрутизации в прокси-роутере.
|
||||
- **xray-core / sing-box** — локальный или удалённый L7/TLS-роутер (прокси), который:
|
||||
1) принимает входящее TCP/TLS-соединение,
|
||||
2) читает TLS ClientHello,
|
||||
3) извлекает SNI,
|
||||
4) по SNI выбирает outbound/апстрим,
|
||||
5) устанавливает новое TCP-соединение к целевому хосту уже **от себя**.
|
||||
- **SNI (Server Name Indication)** — поле в TLS ClientHello, где клиент Telegram сообщает доменное имя для "маскировки"
|
||||
- **DNS-resolve на стороне L7-роутера** — если выходной адрес задан доменом (или роутер решил "всё равно идти по SNI"), то DNS резолвится **на стороне xray/sing-box**, а не на стороне Telegram-клиента
|
||||
|
||||
---
|
||||
|
||||
## Ключевая идея: куда на самом деле идёт соединение решает не то, что вы указали клиенту, а то как L7-роутер трактует SNI
|
||||
|
||||
Механика:
|
||||
|
||||
1) Telegram-клиенту вы можете указать **IP/домен telemt**,как "сервер".
|
||||
2) Между клиентом и telemt стоит xray-core/sing-box, который принимает TCP, читает TLS ClientHello и видит **SNI=petrovich.ru**
|
||||
3) Дальше роутер говорит: "Вижу SNI - направить на апстрим/маршрут N"
|
||||
4) И устанавливает исходящее соединение не "по тому IP, который пользователь подразумевал", а **по домену из SNI** (или по сопоставлению SNI→outbound), используя для определния его IP собственный DNS-кеш или резолвер
|
||||
5) `petrovich.ru` по A-записи указывает **не на IP telemt**, а значит при L7-маршрутизации трафик уйдёт на "оригинальный" сайт за этим доменом, а не в telemt: Telegram-клиент, естественно, не сможет получить ожидаемое поведение, потому что ответить с handshake на той стороне некому
|
||||
|
||||
---
|
||||
|
||||
## Схема №1 "Как это НЕ работает"
|
||||
|
||||
```text
|
||||
Telegram Client
|
||||
|
|
||||
| (указан IP/домен telemt)
|
||||
v
|
||||
telemt instance
|
||||
````
|
||||
|
||||
Ожидание: "я указал telemt -> значит трафик попадёт в telemt" - **нет!**
|
||||
|
||||
---
|
||||
|
||||
## Схема №2. "Как это реально работает с TLS/L7-роутером и SNI"
|
||||
|
||||
```text
|
||||
Telegram Client
|
||||
|
|
||||
| 1) TCP/TLS connection:
|
||||
| - ClientHello:
|
||||
| - SNI=petrovich.ru
|
||||
v
|
||||
xray-core / sing-box / любой L7 router
|
||||
|
|
||||
| 2) читает ClientHello -> вытаскивает SNI
|
||||
| 3) выбирает маршрут по SNI
|
||||
| 4) делает DNS для petrovich.ru
|
||||
| 5) подключается к полученному IP по TLS с этим SNI
|
||||
v
|
||||
"Оригинальный" сайт, A-запись которого не на telemt
|
||||
|
|
||||
X не telemt -> Telegram-клиент не коннектится как ожидалось
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Почему указанный в клиенте IP/домен telemt "не спасает"
|
||||
|
||||
Потому что в таком режиме xray/sing-box выступает как **точка терминации TCP/TLS**, можно сказать - TLS-инспектор на уровне ClientHello, это означает:
|
||||
|
||||
* TCP-сессия от Telegram-клиента заканчивается на xray/sing-box
|
||||
* Дальше создаётся **новая** TCP-сессия "от имени" xray/sing-box к апстриму
|
||||
* Выбор апстрима делается правилами роутинга, а в TLS-сценариях самый удобный и распространённый ключ — **SNI**
|
||||
|
||||
То есть, "куда идти дальше" определяется логикой L7-роутера:
|
||||
|
||||
* либо правилами вида `if SNI == petrovich.ru -> outbound X`,
|
||||
* либо более "автоматическим" поведением: `подключаться к тому хосту, который указан в SNI`,
|
||||
* плюс кэш DNS и собственные резолверы роутера
|
||||
|
||||
---
|
||||
|
||||
## Что именно извлекается из TLS ClientHello и почему этого достаточно
|
||||
|
||||
TLS ClientHello отправляется **в начале** TLS-сессии и, в классическом TLS без ECH, содержит SNI в открытом виде.
|
||||
|
||||
Упрощённо:
|
||||
|
||||
```text
|
||||
ClientHello:
|
||||
- supported_versions
|
||||
- cipher_suites
|
||||
- extensions:
|
||||
- server_name: petrovich.ru <-- SNI
|
||||
- alpn: h2/http1.1/...
|
||||
- ...
|
||||
```
|
||||
|
||||
Роутеру не нужно расшифровывать трафик и завершать TLS "как сервер" — часто достаточно просто прочитать первые пакеты и распарсить ClientHello, чтобы получить SNI и принять решение
|
||||
|
||||
---
|
||||
|
||||
## Типовой алгоритм SNI-роутинга
|
||||
|
||||
1. Принять входящий TCP.
|
||||
2. Подождать первые байты.
|
||||
3. Определить протокол:
|
||||
|
||||
* если видим TLS ClientHello → парсим SNI/ALPN
|
||||
4. Применить route rules:
|
||||
|
||||
* match по `server_name` / `domain` / `tls.sni`
|
||||
5. Выбрать outbound:
|
||||
|
||||
* direct / proxy / specific upstream / detour
|
||||
6. Установить исходящее соединение:
|
||||
|
||||
* либо на фиксированный IP:порт,
|
||||
* либо на домен через DNS-resolve на стороне роутера
|
||||
7. Начать проксирование данных между входом и выходом
|
||||
|
||||
---
|
||||
|
||||
## Почему "A-запись фронтинг-домена не на telemt" ломает кейс
|
||||
|
||||
### Ситуация
|
||||
|
||||
* В ClientHello: `SNI = petrovich.ru`
|
||||
* DNS: `petrovich.ru -> 203.0.113.77` - "оригинальный" сайт
|
||||
* telemt живёт на: `198.51.100.10`
|
||||
|
||||
### Что делает роутер
|
||||
|
||||
* Видит SNI `petrovich.ru`
|
||||
* Либо:
|
||||
|
||||
* (а) напрямую коннектится к `petrovich.ru:443`, резолвя A-запись в `203.0.113.77`,
|
||||
* либо:
|
||||
* (б) выбирает outbound, который указывает на `petrovich.ru` как destination,
|
||||
* либо:
|
||||
* (в) делает sniffing/override destination по SNI
|
||||
|
||||
В итоге исходящий коннект идёт на `203.0.113.77:443`, а не на telemt!
|
||||
Другой сервер, другой протокол, другая логика, где telemt не участвует
|
||||
|
||||
---
|
||||
|
||||
## "Где именно происходит подмена destination на SNI"
|
||||
|
||||
Это зависит от конфигурации, но типовые варианты:
|
||||
|
||||
### Вариант A: outbound задан доменом (и он совпадает с SNI)
|
||||
|
||||
Правило по SNI выбирает outbound, у которого destination задан доменом фронтинга,
|
||||
тогда DNS резолвится на стороне роутера и вы уходите на "оригинальный" хост
|
||||
|
||||
### Вариант B: destination override / sniffing
|
||||
|
||||
Роутер "снифает" SNI и **перезаписывает** destination на домен из SNI (даже если вход изначально был на IP telemt),
|
||||
это особенно коварно: пользователь видит "я подключаюсь к IP telemt", но роутер после sniffing решает иначе
|
||||
|
||||
### Вариант C: split DNS / кеш / независимый резолвер
|
||||
|
||||
Даже если клиент "где-то" резолвит иначе, это не важно: конечный DNS для исходящего коннекта — на стороне xray/sing-box,
|
||||
который может иметь:
|
||||
|
||||
* свой DoH/DoT,
|
||||
* свой кеш,
|
||||
* свои правила fake-ip / system resolver,
|
||||
* и, как следствие, своя "карта" **домен/SNI -> IP**
|
||||
|
||||
---
|
||||
|
||||
## Признаки того, что трафик "утёк на оригинал", а не попал в telemt
|
||||
|
||||
* На стороне telemt отсутствуют входящие соединения/логи
|
||||
* На стороне роутера видно, что destination — домен фронтинга, а IP соответствует публичному сайту
|
||||
* TLS-метрики/сертификат на выходе соответствует "оригинальному" сайту в записах трафика
|
||||
* Telegram-клиент получает неожиданный тип ответов/ошибку handshaking/timeout в debug-режиме
|
||||
|
||||
---
|
||||
|
||||
## Best-practice решение для этого кейса: свой домен фронтинга + заглушка на telemt + Let's Encrypt
|
||||
|
||||
### Цель
|
||||
|
||||
Сделать так, чтобы:
|
||||
|
||||
* SNI (фронтинг-домен) **резолвился в IP telemt**,
|
||||
* на IP telemt реально был TLS-сервис с валидным сертификатом под этот домен,
|
||||
* даже если кто-то "попробует открыть домен как сайт", он увидит нормальную заглушку, а не "пустоту"
|
||||
|
||||
### Что это даёт
|
||||
|
||||
* xray/sing-box, маршрутизируя по SNI, будет неизбежно приходить на telemt, потому что DNS(SNI-домен) → IP telemt
|
||||
* Внешний вид будет правдоподобным: обычный домен с обычным сертификатом
|
||||
* Устойчивость: меньше сюрпризов от DNS-кеша/перерезолва/"умных" правил роутера
|
||||
|
||||
---
|
||||
|
||||
## Рекомендуемая схема (целевое состояние)
|
||||
|
||||
```text
|
||||
Telegram Client
|
||||
|
|
||||
| TLS ClientHello: SNI = hello.example.com
|
||||
v
|
||||
xray-core / sing-box
|
||||
|
|
||||
| Route by SNI -> outbound -> connect to hello.example.com:443
|
||||
| DNS(hello.example.com) = IP telemt
|
||||
v
|
||||
telemt instance (IP telemt)
|
||||
|
|
||||
| TLS cert for hello.example.com (Let's Encrypt)
|
||||
| + сайт-заглушка / health endpoint
|
||||
v
|
||||
OK
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Практический чеклист (минимальный)
|
||||
|
||||
1. Купить/иметь домен: `hello.example.com`
|
||||
2. В DNS:
|
||||
|
||||
* `A hello.example.com -> <IP telemt>`
|
||||
* (опционально) AAAA, если используете IPv6 и он стабилен
|
||||
3. На telemt-хосте:
|
||||
|
||||
* поднять TLS endpoint на 443 с валидным сертификатом LE под `hello.example.com`
|
||||
* отдать "заглушку" (например, статический сайт), чтобы домен выглядел как обычный веб-сервис
|
||||
4. В xray/sing-box правилах:
|
||||
|
||||
* маршрутизировать нужный трафик по SNI = `hello.example.com` в "правильный" outbound (к telemt)
|
||||
* избегать конфигураций, где destination override уводит на чужой домен
|
||||
5. Важно:
|
||||
|
||||
* если вы используете кеш DNS на роутере — сбросить/обновить его после смены A-записи
|
||||
|
||||
---
|
||||
|
||||
## Пояснение про сайт-заглушку
|
||||
|
||||
Для эмуляции TLS, telemt имеет подсистему TLS-F в `src/tls_front`:
|
||||
- её модуль - fetcher, собирает TLS-профили, чтоб максимально поведенчески корректно повторять TLS конкретно указанного сайта
|
||||
|
||||
Когда вы указываете сайт, который не отвечает по TLS:
|
||||
- fetcher не может собрать TLS-профиль и происходит fallback на `fake_cert_len` - примитивный алгоритм,
|
||||
- он забивает служебную информацию TLS рандомными байтами,
|
||||
- простые системы DPI не распознают это
|
||||
- однако, продвинутые системы, такие как nEdge или Fraud Control в сетях мобильной связи легко заблокируют или замедлят такой трафик
|
||||
|
||||
Создав сайт-заглушку с Let's Encrypt сертификатом, вы даёте TLS-F возможность получить данные сертификата и корректно его "повторять" в дальнейшем
|
||||
|
||||
---
|
||||
|
||||
## Вариант конфиг-подхода: "SNI строго привязываем к telemt - фиксированный IP"
|
||||
|
||||
Чтобы полностью исключить зависимость от DNS если вам это нужно, можно сделать outbound, который ходит на **фиксированный IP telemt**, но при этом выставляет SNI/Host как `hello.example.com`.
|
||||
|
||||
Идея:
|
||||
|
||||
* destination: `IP:443`
|
||||
* SNI: `hello.example.com`
|
||||
* сертификат на telemt именно под `hello.example.com`
|
||||
|
||||
Так вы получаете:
|
||||
|
||||
* TLS выглядит корректно, ведь SNI совпадает с сертификатом,
|
||||
* а routing никогда не уйдёт на "оригинал", потому что A-запись указывает на telemt и контроллируется вами!
|
||||
|
||||
Но в вашем описании проблема как раз в том, что роутер "сам решает по SNI и резолвит домен", поэтому самый универсальный вариант — сделать так, чтобы DNS всегда приводил в telemt
|
||||
|
||||
---
|
||||
|
||||
## Пример логики правил на псевдоконфиге L7-роутера
|
||||
|
||||
```text
|
||||
if inbound is TLS and sni == "hello.example.com":
|
||||
route -> outbound "telemt"
|
||||
else:
|
||||
route -> outbound "default"
|
||||
```
|
||||
|
||||
Outbound `telemt`:
|
||||
|
||||
* destination: `hello.example.com:443`
|
||||
* TLS enabled
|
||||
* SNI: `hello.example.com`
|
||||
|
||||
---
|
||||
|
||||
## Отдельно: что может неожиданно сломать даже "правильный" DNS
|
||||
|
||||
* **Кеширование DNS** на xray/sing-box или на системном резолвере, особенно при смене A-записи
|
||||
* **Split-horizon DNS**: разные ответы внутри/снаружи, попытки подмены/терминирования в других точках
|
||||
* **IPv6**: если есть AAAA и он указывает не туда, роутер может предпочесть IPv6: помните, что поддержка v6 нестабильна и не рекомендуется в prod
|
||||
* **DoH/DoT** на роутере: он может резолвить не тем резолвером, которым вы проверяли
|
||||
|
||||
Минимальная гигиена:
|
||||
|
||||
* контролировать A/AAAA,
|
||||
* держать TTL разумным,
|
||||
* проверять, каким резолвером пользуется именно роутер,
|
||||
* при необходимости отключить/ограничить destination override
|
||||
|
||||
---
|
||||
|
||||
## Итог
|
||||
|
||||
В режиме TLS-fronting с xray-core/sing-box как L7/TLS-роутером **SNI становится приоритетным "source-of-truth" для маршрутизации**
|
||||
|
||||
Если фронтинг-домен по DNS указывает не на IP telemt, роутер честно уводит трафик на "оригинальный" сайт, потому что он строит исходящее соединение "по SNI"
|
||||
|
||||
Надёжное решение для этого кейса:
|
||||
|
||||
* использовать **свой домен** для фронтинга,
|
||||
* направить его **A/AAAA** на IP telemt,
|
||||
* поднять на telemt **TLS-сервис с Let’s Encrypt сертификатом** под этот домен,
|
||||
* (желательно) держать **сайт-заглушку**, чтобы 443 выглядел как обычный HTTPS
|
||||
285
docs/model/MODEL.en.md
Normal file
285
docs/model/MODEL.en.md
Normal file
@@ -0,0 +1,285 @@
|
||||
# Telemt Runtime Model
|
||||
|
||||
## Scope
|
||||
This document defines runtime concepts used by the Middle-End (ME) transport pipeline and the orchestration logic around it.
|
||||
|
||||
It focuses on:
|
||||
- `ME Pool / Reader / Writer / Refill / Registry`
|
||||
- `Adaptive Floor`
|
||||
- `Trio-State`
|
||||
- `Generation Lifecycle`
|
||||
|
||||
## Core Entities
|
||||
|
||||
### ME Pool
|
||||
`ME Pool` is the runtime orchestrator for all Middle-End writers.
|
||||
|
||||
Responsibilities:
|
||||
- Holds writer inventory by DC/family/endpoint.
|
||||
- Maintains routing primitives and writer selection policy.
|
||||
- Tracks generation state (`active`, `warm`, `draining` context).
|
||||
- Applies runtime policies (floor mode, refill, reconnect, reinit, fallback behavior).
|
||||
- Exposes readiness gates used by admission logic (for conditional accept/cast behavior).
|
||||
|
||||
Non-goals:
|
||||
- It does not own client protocol decoding.
|
||||
- It does not own per-client business policy (quotas/limits).
|
||||
|
||||
### ME Writer
|
||||
`ME Writer` is a long-lived ME RPC tunnel bound to one concrete ME endpoint (`ip:port`), with:
|
||||
- Outbound command channel (send path).
|
||||
- Associated reader loop (inbound path).
|
||||
- Health/degraded flags.
|
||||
- Contour/state and generation metadata.
|
||||
|
||||
A writer is the actual data plane carrier for client sessions once bound.
|
||||
|
||||
### ME Reader
|
||||
`ME Reader` is the inbound parser/dispatcher for one writer:
|
||||
- Reads/decrypts ME RPC frames.
|
||||
- Validates sequence/checksum.
|
||||
- Routes payloads to client-connection channels via `Registry`.
|
||||
- Emits close/ack/data events and updates telemetry.
|
||||
|
||||
Design intent:
|
||||
- Reader must stay non-blocking as much as possible.
|
||||
- Backpressure on a single client route must not stall the whole writer stream.
|
||||
|
||||
### Refill
|
||||
`Refill` is the recovery mechanism that restores writer coverage when capacity drops:
|
||||
- Per-endpoint restore (same endpoint first).
|
||||
- Per-DC restore to satisfy required floor.
|
||||
- Optional outage-mode/shadow behavior for fragile single-endpoint DCs.
|
||||
|
||||
Refill works asynchronously and should not block hot routing paths.
|
||||
|
||||
### Registry
|
||||
`Registry` is the routing index between ME and client sessions:
|
||||
- `conn_id -> client response channel`
|
||||
- `conn_id <-> writer_id` binding map
|
||||
- writer activity snapshots and idle tracking
|
||||
|
||||
Main invariants:
|
||||
- A `conn_id` routes to at most one active response channel.
|
||||
- Writer loss triggers safe unbind/cleanup and close propagation.
|
||||
- Registry state is the source of truth for active ME-bound session mapping.
|
||||
|
||||
## Adaptive Floor
|
||||
|
||||
### What it is
|
||||
`Adaptive Floor` is a runtime policy that changes target writer count per DC based on observed activity, instead of always holding static peak floor.
|
||||
|
||||
### Why it exists
|
||||
Goals:
|
||||
- Reduce idle writer churn under low traffic.
|
||||
- Keep enough warm capacity to avoid client-visible stalls on burst recovery.
|
||||
- Limit needless reconnect storms on unstable endpoints.
|
||||
|
||||
### Behavioral model
|
||||
- Under activity: floor converges toward configured static requirement.
|
||||
- Under prolonged idle: floor can shrink to a safe minimum.
|
||||
- Recovery/grace windows prevent aggressive oscillation.
|
||||
|
||||
### Safety constraints
|
||||
- Never violate minimal survivability floor for a DC group.
|
||||
- Refill must still restore quickly on demand.
|
||||
- Floor adaptation must not force-drop already bound healthy sessions.
|
||||
|
||||
## Trio-State
|
||||
|
||||
`Trio-State` is writer contouring:
|
||||
- `Warm`
|
||||
- `Active`
|
||||
- `Draining`
|
||||
|
||||
### State semantics
|
||||
- `Warm`: connected and validated, not primary for new binds.
|
||||
- `Active`: preferred for new binds and normal traffic.
|
||||
- `Draining`: no new regular binds; existing sessions continue until graceful retirement rules apply.
|
||||
|
||||
### Transition intent
|
||||
- `Warm -> Active`: when coverage/readiness conditions are satisfied.
|
||||
- `Active -> Draining`: on generation swap, endpoint replacement, or controlled retirement.
|
||||
- `Draining -> removed`: after drain TTL/force-close policy (or when naturally empty).
|
||||
|
||||
This separation reduces SPOF and keeps cutovers predictable.
|
||||
|
||||
## Generation Lifecycle
|
||||
|
||||
Generation isolates pool epochs during reinit/reconfiguration.
|
||||
|
||||
### Lifecycle phases
|
||||
1. `Bootstrap`: initial writers are established.
|
||||
2. `Warmup`: next generation writers are created and validated.
|
||||
3. `Activation`: generation promoted to active when coverage gate passes.
|
||||
4. `Drain`: previous generation becomes draining, existing sessions are allowed to finish.
|
||||
5. `Retire`: old generation writers are removed after graceful rules.
|
||||
|
||||
### Operational guarantees
|
||||
- No partial generation activation without minimum coverage.
|
||||
- Existing healthy client sessions should not be dropped just because a new generation appears.
|
||||
- Draining generation exists to absorb in-flight traffic during swap.
|
||||
|
||||
### Readiness and admission
|
||||
Pool readiness is not equivalent to “all endpoints fully saturated”.
|
||||
Typical gating strategy:
|
||||
- Open admission when per-DC minimal alive coverage exists.
|
||||
- Continue background saturation for multi-endpoint DCs.
|
||||
|
||||
This keeps startup latency low while preserving eventual full capacity.
|
||||
|
||||
## Interactions Between Concepts
|
||||
|
||||
- `Generation` defines pool epochs.
|
||||
- `Trio-State` defines per-writer role inside/around those epochs.
|
||||
- `Adaptive Floor` defines how much capacity should be maintained right now.
|
||||
- `Refill` is the actuator that closes the gap between desired and current capacity.
|
||||
- `Registry` keeps per-session routing correctness while all of the above changes over time.
|
||||
|
||||
## Architectural Approach
|
||||
|
||||
### Layered Design
|
||||
The runtime is intentionally split into two planes:
|
||||
- `Control Plane`: decides desired topology and policy (`floor`, `generation swap`, `refill`, `fallback`).
|
||||
- `Data Plane`: executes packet/session transport (`reader`, `writer`, routing, acks, close propagation).
|
||||
|
||||
Architectural rule:
|
||||
- Control Plane may change writer inventory and policy.
|
||||
- Data Plane must remain stable and low-latency while those changes happen.
|
||||
|
||||
### Ownership Model
|
||||
Ownership is centered around explicit state domains:
|
||||
- `MePool` owns writer lifecycle and policy state.
|
||||
- `Registry` owns per-connection routing bindings.
|
||||
- `Writer task` owns outbound ME socket send progression.
|
||||
- `Reader task` owns inbound ME socket parsing and event dispatch.
|
||||
|
||||
This prevents accidental cross-layer mutation and keeps invariants local.
|
||||
|
||||
### Control Plane Responsibilities
|
||||
Control Plane is event-driven and policy-driven:
|
||||
- Startup initialization and readiness gates.
|
||||
- Runtime reinit (periodic or config-triggered).
|
||||
- Coverage checks per DC/family/endpoint group.
|
||||
- Floor enforcement (static/adaptive).
|
||||
- Refill scheduling and retry orchestration.
|
||||
- Generation transition (`warm -> active`, previous `active -> draining`).
|
||||
|
||||
Control Plane must prioritize determinism over short-term aggressiveness.
|
||||
|
||||
### Data Plane Responsibilities
|
||||
Data Plane is throughput-first and allocation-sensitive:
|
||||
- Session bind to writer.
|
||||
- Per-frame parsing/validation and dispatch.
|
||||
- Ack and close signal propagation.
|
||||
- Route drop behavior under missing connection or closed channel.
|
||||
- Minimal critical logging in hot path.
|
||||
|
||||
Data Plane should avoid waiting on operations that are not strictly required for frame correctness.
|
||||
|
||||
## Concurrency and Synchronization
|
||||
|
||||
### Concurrency Principles
|
||||
- Per-writer isolation: each writer has independent send/read task loops.
|
||||
- Per-connection isolation: client channel state is scoped by `conn_id`.
|
||||
- Asynchronous recovery: refill/reconnect runs outside the packet hot path.
|
||||
|
||||
### Synchronization Strategy
|
||||
- Shared maps use fine-grained, short-lived locking.
|
||||
- Read-mostly paths avoid broad write-lock windows.
|
||||
- Backpressure decisions are localized at route/channel boundary.
|
||||
|
||||
Design target:
|
||||
- A slow consumer should degrade only itself (or its route), not global writer progress.
|
||||
|
||||
### Cancellation and Shutdown
|
||||
Writer and reader loops are cancellation-aware:
|
||||
- explicit cancel token / close command support;
|
||||
- safe unbind and cleanup via registry;
|
||||
- deterministic order: stop admission -> drain/close -> release resources.
|
||||
|
||||
## Consistency Model
|
||||
|
||||
### Session Consistency
|
||||
For one `conn_id`:
|
||||
- exactly one active route target at a time;
|
||||
- close and unbind must be idempotent;
|
||||
- writer loss must not leave dangling bindings.
|
||||
|
||||
### Generation Consistency
|
||||
Generational consistency guarantees:
|
||||
- New generation is not promoted before minimum coverage gate.
|
||||
- Previous generation remains available in `draining` state during handover.
|
||||
- Forced retirement is policy-bound (`drain ttl`, optional force-close), not immediate.
|
||||
|
||||
### Policy Consistency
|
||||
Policy changes (`adaptive/static floor`, fallback mode, retries) should apply without violating established active-session routing invariants.
|
||||
|
||||
## Backpressure and Flow Control
|
||||
|
||||
### Route-Level Backpressure
|
||||
Route channels are bounded by design.
|
||||
When pressure increases:
|
||||
- short burst absorption is allowed;
|
||||
- prolonged congestion triggers controlled drop semantics;
|
||||
- drop accounting is explicit via metrics/counters.
|
||||
|
||||
### Reader Non-Blocking Priority
|
||||
Inbound ME reader path should never be serialized behind one congested client route.
|
||||
Practical implication:
|
||||
- prefer non-blocking route attempt in the parser loop;
|
||||
- move heavy recovery to async side paths.
|
||||
|
||||
## Failure Domain Strategy
|
||||
|
||||
### Endpoint-Level Failure
|
||||
Failure of one endpoint should trigger endpoint-scoped recovery first:
|
||||
- same endpoint reconnect;
|
||||
- endpoint replacement within same DC group if applicable.
|
||||
|
||||
### DC-Level Degradation
|
||||
If a DC group cannot satisfy floor:
|
||||
- keep service via remaining coverage if policy allows;
|
||||
- continue asynchronous refill saturation in background.
|
||||
|
||||
### Whole-Pool Readiness Loss
|
||||
If no sufficient ME coverage exists:
|
||||
- admission gate can hold new accepts (conditional policy);
|
||||
- existing sessions should continue when their path remains healthy.
|
||||
|
||||
## Performance Architecture Notes
|
||||
|
||||
### Hotpath Discipline
|
||||
Allowed in hotpath:
|
||||
- fixed-size parsing and cheap validation;
|
||||
- bounded channel operations;
|
||||
- precomputed or low-allocation access patterns.
|
||||
|
||||
Avoid in hotpath:
|
||||
- repeated expensive decoding;
|
||||
- broad locks with awaits inside critical sections;
|
||||
- verbose high-frequency logging.
|
||||
|
||||
### Throughput Stability Over Peak Spikes
|
||||
Architecture prefers stable throughput and predictable latency over short peak gains that increase churn or long-tail reconnect times.
|
||||
|
||||
## Evolution and Extension Rules
|
||||
|
||||
To evolve this model safely:
|
||||
- Add new policy knobs in Control Plane first.
|
||||
- Keep Data Plane contracts stable (`conn_id`, route semantics, close semantics).
|
||||
- Validate generation and registry invariants before enabling by default.
|
||||
- Introduce new retry/recovery strategies behind explicit config.
|
||||
|
||||
## Failure and Recovery Notes
|
||||
|
||||
- Single-endpoint DC failure is a normal degraded mode case; policy should prioritize fast reconnect and optional shadow/probing strategies.
|
||||
- Idle close by peer should be treated as expected when upstream enforces idle timeout.
|
||||
- Reconnect backoff must protect against synchronized churn while still allowing fast first retries.
|
||||
- Fallback (`ME -> direct DC`) is a policy switch, not a transport bug by itself.
|
||||
|
||||
## Terminology Summary
|
||||
- `Coverage`: enough live writers to satisfy per-DC acceptance policy.
|
||||
- `Floor`: target minimum writer count policy.
|
||||
- `Churn`: frequent writer reconnect/remove cycles.
|
||||
- `Hotpath`: per-packet/per-connection data path where extra waits/allocations are expensive.
|
||||
285
docs/model/MODEL.ru.md
Normal file
285
docs/model/MODEL.ru.md
Normal file
@@ -0,0 +1,285 @@
|
||||
# Runtime-модель Telemt
|
||||
|
||||
## Область описания
|
||||
Документ фиксирует ключевые runtime-понятия пайплайна Middle-End (ME) и оркестрации вокруг него.
|
||||
|
||||
Фокус:
|
||||
- `ME Pool / Reader / Writer / Refill / Registry`
|
||||
- `Adaptive Floor`
|
||||
- `Trio-State`
|
||||
- `Generation Lifecycle`
|
||||
|
||||
## Базовые сущности
|
||||
|
||||
### ME Pool
|
||||
`ME Pool` — центральный оркестратор всех Middle-End writer-ов.
|
||||
|
||||
Зона ответственности:
|
||||
- хранит инвентарь writer-ов по DC/family/endpoint;
|
||||
- управляет выбором writer-а и маршрутизацией;
|
||||
- ведёт состояние поколений (`active`, `warm`, `draining` контекст);
|
||||
- применяет runtime-политики (floor, refill, reconnect, reinit, fallback);
|
||||
- отдаёт сигналы готовности для admission-логики (conditional accept/cast).
|
||||
|
||||
Что не делает:
|
||||
- не декодирует клиентский протокол;
|
||||
- не реализует бизнес-политику пользователя (квоты/лимиты).
|
||||
|
||||
### ME Writer
|
||||
`ME Writer` — долгоживущий ME RPC-канал к конкретному endpoint (`ip:port`), у которого есть:
|
||||
- канал команд на отправку;
|
||||
- связанный reader loop для входящего потока;
|
||||
- флаги состояния/деградации;
|
||||
- метаданные contour/state и generation.
|
||||
|
||||
Writer — это фактический data-plane носитель клиентских сессий после бинда.
|
||||
|
||||
### ME Reader
|
||||
`ME Reader` — входной parser/dispatcher одного writer-а:
|
||||
- читает и расшифровывает ME RPC-фреймы;
|
||||
- проверяет sequence/checksum;
|
||||
- маршрутизирует payload в client-каналы через `Registry`;
|
||||
- обрабатывает close/ack/data и обновляет телеметрию.
|
||||
|
||||
Инженерный принцип:
|
||||
- Reader должен оставаться неблокирующим.
|
||||
- Backpressure одной клиентской сессии не должен останавливать весь поток writer-а.
|
||||
|
||||
### Refill
|
||||
`Refill` — механизм восстановления покрытия writer-ов при просадке:
|
||||
- восстановление на том же endpoint в первую очередь;
|
||||
- восстановление по DC до требуемого floor;
|
||||
- опциональные outage/shadow-режимы для хрупких single-endpoint DC.
|
||||
|
||||
Refill работает асинхронно и не должен блокировать hotpath.
|
||||
|
||||
### Registry
|
||||
`Registry` — маршрутизационный индекс между ME и клиентскими сессиями:
|
||||
- `conn_id -> канал ответа клиенту`;
|
||||
- map биндов `conn_id <-> writer_id`;
|
||||
- снимки активности writer-ов и idle-трекинг.
|
||||
|
||||
Ключевые инварианты:
|
||||
- один `conn_id` маршрутизируется максимум в один активный канал ответа;
|
||||
- потеря writer-а приводит к безопасному unbind/cleanup и отправке close;
|
||||
- именно `Registry` является источником истины по активным ME-биндам.
|
||||
|
||||
## Adaptive Floor
|
||||
|
||||
### Что это
|
||||
`Adaptive Floor` — runtime-политика, которая динамически меняет целевое число writer-ов на DC в зависимости от активности, а не держит всегда фиксированный статический floor.
|
||||
|
||||
### Зачем
|
||||
Цели:
|
||||
- уменьшить churn на idle-трафике;
|
||||
- сохранить достаточную прогретую ёмкость для быстрых всплесков;
|
||||
- снизить лишние reconnect-штормы на нестабильных endpoint.
|
||||
|
||||
### Модель поведения
|
||||
- при активности floor стремится к статическому требованию;
|
||||
- при длительном idle floor может снижаться до безопасного минимума;
|
||||
- grace/recovery окна не дают системе "флапать" слишком резко.
|
||||
|
||||
### Ограничения безопасности
|
||||
- нельзя нарушать минимальный floor выживаемости DC-группы;
|
||||
- refill обязан быстро нарастить покрытие по запросу;
|
||||
- адаптация не должна принудительно ронять уже привязанные healthy-сессии.
|
||||
|
||||
## Trio-State
|
||||
|
||||
`Trio-State` — контурная роль writer-а:
|
||||
- `Warm`
|
||||
- `Active`
|
||||
- `Draining`
|
||||
|
||||
### Семантика состояний
|
||||
- `Warm`: writer подключён и валиден, но не основной для новых биндов.
|
||||
- `Active`: приоритетный для новых биндов и обычного трафика.
|
||||
- `Draining`: новые обычные бинды не назначаются; текущие сессии живут до правил graceful-вывода.
|
||||
|
||||
### Логика переходов
|
||||
- `Warm -> Active`: когда достигнуты условия покрытия/готовности.
|
||||
- `Active -> Draining`: при swap поколения, замене endpoint или контролируемом выводе.
|
||||
- `Draining -> removed`: после drain TTL/force-close политики (или естественного опустошения).
|
||||
|
||||
Такое разделение снижает SPOF-риски и делает cutover предсказуемым.
|
||||
|
||||
## Generation Lifecycle
|
||||
|
||||
Generation изолирует эпохи пула при reinit/reconfiguration.
|
||||
|
||||
### Фазы жизненного цикла
|
||||
1. `Bootstrap`: поднимается начальный набор writer-ов.
|
||||
2. `Warmup`: создаётся и валидируется новое поколение.
|
||||
3. `Activation`: новое поколение становится active после прохождения coverage-gate.
|
||||
4. `Drain`: предыдущее поколение переводится в draining, текущим сессиям дают завершиться.
|
||||
5. `Retire`: старое поколение удаляется по graceful-правилам.
|
||||
|
||||
### Операционные гарантии
|
||||
- нельзя активировать поколение частично без минимального покрытия;
|
||||
- healthy-клиенты не должны теряться только из-за появления нового поколения;
|
||||
- draining-поколение служит буфером для in-flight трафика во время swap.
|
||||
|
||||
### Готовность и приём клиентов
|
||||
Готовность пула не равна "все endpoint полностью насыщены".
|
||||
Типичная стратегия:
|
||||
- открыть admission при минимально достаточном alive-покрытии по DC;
|
||||
- параллельно продолжать saturation для multi-endpoint DC.
|
||||
|
||||
Это уменьшает startup latency и сохраняет выход на полную ёмкость.
|
||||
|
||||
## Как понятия связаны между собой
|
||||
|
||||
- `Generation` задаёт эпохи пула.
|
||||
- `Trio-State` задаёт роль каждого writer-а внутри/между эпохами.
|
||||
- `Adaptive Floor` задаёт, сколько ёмкости нужно сейчас.
|
||||
- `Refill` — исполнитель, который закрывает разницу между desired и current capacity.
|
||||
- `Registry` гарантирует корректную маршрутизацию сессий, пока всё выше меняется.
|
||||
|
||||
## Архитектурный подход
|
||||
|
||||
### Слоистая модель
|
||||
Runtime специально разделён на две плоскости:
|
||||
- `Control Plane`: принимает решения о целевой топологии и политиках (`floor`, `generation swap`, `refill`, `fallback`).
|
||||
- `Data Plane`: исполняет транспорт сессий и пакетов (`reader`, `writer`, маршрутизация, ack, close).
|
||||
|
||||
Ключевое правило:
|
||||
- Control Plane может менять состав writer-ов и policy.
|
||||
- Data Plane должен оставаться стабильным и низколатентным в момент этих изменений.
|
||||
|
||||
### Модель владения состоянием
|
||||
Владение разделено по доменам:
|
||||
- `MePool` владеет жизненным циклом writer-ов и policy-state.
|
||||
- `Registry` владеет routing-биндами клиентских сессий.
|
||||
- `Writer task` владеет исходящей прогрессией ME-сокета.
|
||||
- `Reader task` владеет входящим парсингом и dispatch-событиями.
|
||||
|
||||
Это ограничивает побочные мутации и локализует инварианты.
|
||||
|
||||
### Обязанности Control Plane
|
||||
Control Plane работает событийно и policy-ориентированно:
|
||||
- стартовая инициализация и readiness-gate;
|
||||
- runtime reinit (периодический и/или по изменению конфигурации);
|
||||
- проверки покрытия по DC/family/endpoint group;
|
||||
- применение floor-политики (static/adaptive);
|
||||
- планирование refill и orchestration retry;
|
||||
- переходы поколений (`warm -> active`, прежний `active -> draining`).
|
||||
|
||||
Для него важнее детерминизм, чем агрессивная краткосрочная реакция.
|
||||
|
||||
### Обязанности Data Plane
|
||||
Data Plane ориентирован на пропускную способность и предсказуемую задержку:
|
||||
- bind клиентской сессии к writer-у;
|
||||
- per-frame parsing/validation/dispatch;
|
||||
- распространение ack/close;
|
||||
- корректная реакция на missing conn/closed channel;
|
||||
- минимальный лог-шум в hotpath.
|
||||
|
||||
Data Plane не должен ждать операций, не критичных для корректности текущего фрейма.
|
||||
|
||||
## Конкурентность и синхронизация
|
||||
|
||||
### Принципы конкурентности
|
||||
- Изоляция по writer-у: у каждого writer-а независимые send/read loop.
|
||||
- Изоляция по сессии: состояние канала локально для `conn_id`.
|
||||
- Асинхронное восстановление: refill/reconnect выполняются вне пакетного hotpath.
|
||||
|
||||
### Стратегия синхронизации
|
||||
- Для shared map используются короткие и узкие lock-секции.
|
||||
- Read-heavy пути избегают длительных write-lock окон.
|
||||
- Решения по backpressure локализованы на границе route/channel.
|
||||
|
||||
Цель:
|
||||
- медленный consumer должен деградировать локально, не останавливая глобальный прогресс writer-а.
|
||||
|
||||
### Cancellation и shutdown
|
||||
Reader/Writer loop должны быть cancellation-aware:
|
||||
- явные cancel token / close command;
|
||||
- безопасный unbind/cleanup через registry;
|
||||
- детерминированный порядок: stop admission -> drain/close -> release resources.
|
||||
|
||||
## Модель согласованности
|
||||
|
||||
### Согласованность сессии
|
||||
Для одного `conn_id`:
|
||||
- одновременно ровно один активный route-target;
|
||||
- close/unbind операции идемпотентны;
|
||||
- потеря writer-а не оставляет dangling-бинды.
|
||||
|
||||
### Согласованность поколения
|
||||
Гарантии generation:
|
||||
- новое поколение не активируется до прохождения минимального coverage-gate;
|
||||
- предыдущее поколение остаётся в `draining` на время handover;
|
||||
- принудительный вывод writer-ов ограничен policy (`drain ttl`, optional force-close), а не мгновенный.
|
||||
|
||||
### Согласованность политик
|
||||
Изменение policy (`adaptive/static floor`, fallback mode, retries) не должно ломать инварианты маршрутизации уже активных сессий.
|
||||
|
||||
## Backpressure и управление потоком
|
||||
|
||||
### Route-level backpressure
|
||||
Route-каналы намеренно bounded.
|
||||
При росте нагрузки:
|
||||
- кратковременный burst поглощается;
|
||||
- длительная перегрузка переходит в контролируемую drop-семантику;
|
||||
- все drop-сценарии должны быть прозрачно видны в метриках.
|
||||
|
||||
### Приоритет неблокирующего Reader
|
||||
Входящий ME-reader path не должен сериализоваться из-за одной перегруженной клиентской сессии.
|
||||
Практически это означает:
|
||||
- использовать неблокирующую попытку route в parser loop;
|
||||
- выносить тяжёлое восстановление в асинхронные side-path.
|
||||
|
||||
## Стратегия доменов отказа
|
||||
|
||||
### Отказ отдельного endpoint
|
||||
Сначала применяется endpoint-local recovery:
|
||||
- reconnect в тот же endpoint;
|
||||
- затем замена endpoint внутри той же DC-группы (если доступно).
|
||||
|
||||
### Деградация уровня DC
|
||||
Если DC-группа не набирает floor:
|
||||
- сервис сохраняется на остаточном покрытии (если policy разрешает);
|
||||
- saturation refill продолжается асинхронно в фоне.
|
||||
|
||||
### Потеря готовности всего пула
|
||||
Если достаточного ME-покрытия нет:
|
||||
- admission gate может временно закрыть приём новых подключений (conditional policy);
|
||||
- уже активные сессии продолжают работать, пока их маршрут остаётся healthy.
|
||||
|
||||
## Архитектурные заметки по производительности
|
||||
|
||||
### Дисциплина hotpath
|
||||
Допустимо в hotpath:
|
||||
- фиксированный и дешёвый parsing/validation;
|
||||
- bounded channel operations;
|
||||
- precomputed/low-allocation доступ к данным.
|
||||
|
||||
Нежелательно в hotpath:
|
||||
- повторные дорогие decode;
|
||||
- широкие lock-секции с `await` внутри;
|
||||
- высокочастотный подробный logging.
|
||||
|
||||
### Стабильность важнее пиков
|
||||
Архитектура приоритетно выбирает стабильную пропускную способность и предсказуемую latency, а не краткосрочные пики ценой churn и long-tail reconnect.
|
||||
|
||||
## Правила эволюции модели
|
||||
|
||||
Чтобы расширять модель безопасно:
|
||||
- новые policy knobs сначала внедрять в Control Plane;
|
||||
- контракты Data Plane (`conn_id`, route/close семантика) держать стабильными;
|
||||
- перед дефолтным включением проверять generation/registry инварианты;
|
||||
- новые recovery/retry стратегии вводить через явный config-флаг.
|
||||
|
||||
## Нюансы отказов и восстановления
|
||||
|
||||
- падение single-endpoint DC — штатный деградированный сценарий; приоритет: быстрый reconnect и, при необходимости, shadow/probing;
|
||||
- idle-close со стороны peer должен считаться нормальным событием при upstream idle-timeout;
|
||||
- backoff reconnect-логики должен ограничивать синхронный churn, но сохранять быстрые первые попытки;
|
||||
- fallback (`ME -> direct DC`) — это переключаемая policy-ветка, а не автоматический признак бага транспорта.
|
||||
|
||||
## Краткий словарь
|
||||
- `Coverage`: достаточное число живых writer-ов для политики приёма по DC.
|
||||
- `Floor`: целевая минимальная ёмкость writer-ов.
|
||||
- `Churn`: частые циклы reconnect/remove writer-ов.
|
||||
- `Hotpath`: пер-пакетный/пер-коннектный путь, где любые лишние ожидания и аллокации особенно дороги.
|
||||
93
install.sh
Normal file
93
install.sh
Normal file
@@ -0,0 +1,93 @@
|
||||
#!/bin/sh
|
||||
set -eu
|
||||
|
||||
REPO="${REPO:-telemt/telemt}"
|
||||
BIN_NAME="${BIN_NAME:-telemt}"
|
||||
VERSION="${1:-${VERSION:-latest}}"
|
||||
INSTALL_DIR="${INSTALL_DIR:-/usr/local/bin}"
|
||||
|
||||
say() {
|
||||
printf '%s\n' "$*"
|
||||
}
|
||||
|
||||
die() {
|
||||
printf 'Error: %s\n' "$*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
need_cmd() {
|
||||
command -v "$1" >/dev/null 2>&1 || die "required command not found: $1"
|
||||
}
|
||||
|
||||
detect_arch() {
|
||||
arch="$(uname -m)"
|
||||
case "$arch" in
|
||||
x86_64|amd64) printf 'x86_64\n' ;;
|
||||
aarch64|arm64) printf 'aarch64\n' ;;
|
||||
*) die "unsupported architecture: $arch" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
detect_libc() {
|
||||
case "$(ldd --version 2>&1 || true)" in
|
||||
*musl*) printf 'musl\n' ;;
|
||||
*) printf 'gnu\n' ;;
|
||||
esac
|
||||
}
|
||||
|
||||
fetch_to_stdout() {
|
||||
url="$1"
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
curl -fsSL "$url"
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
wget -qO- "$url"
|
||||
else
|
||||
die "neither curl nor wget is installed"
|
||||
fi
|
||||
}
|
||||
|
||||
install_binary() {
|
||||
src="$1"
|
||||
dst="$2"
|
||||
|
||||
if [ -w "$INSTALL_DIR" ] || { [ ! -e "$INSTALL_DIR" ] && [ -w "$(dirname "$INSTALL_DIR")" ]; }; then
|
||||
mkdir -p "$INSTALL_DIR"
|
||||
install -m 0755 "$src" "$dst"
|
||||
elif command -v sudo >/dev/null 2>&1; then
|
||||
sudo mkdir -p "$INSTALL_DIR"
|
||||
sudo install -m 0755 "$src" "$dst"
|
||||
else
|
||||
die "cannot write to $INSTALL_DIR and sudo is not available"
|
||||
fi
|
||||
}
|
||||
|
||||
need_cmd uname
|
||||
need_cmd tar
|
||||
need_cmd mktemp
|
||||
need_cmd grep
|
||||
need_cmd install
|
||||
|
||||
ARCH="$(detect_arch)"
|
||||
LIBC="$(detect_libc)"
|
||||
|
||||
case "$VERSION" in
|
||||
latest)
|
||||
URL="https://github.com/$REPO/releases/latest/download/${BIN_NAME}-${ARCH}-linux-${LIBC}.tar.gz"
|
||||
;;
|
||||
*)
|
||||
URL="https://github.com/$REPO/releases/download/${VERSION}/${BIN_NAME}-${ARCH}-linux-${LIBC}.tar.gz"
|
||||
;;
|
||||
esac
|
||||
|
||||
TMPDIR="$(mktemp -d)"
|
||||
trap 'rm -rf "$TMPDIR"' EXIT INT TERM
|
||||
|
||||
say "Installing $BIN_NAME ($VERSION) for $ARCH-linux-$LIBC..."
|
||||
fetch_to_stdout "$URL" | tar -xzf - -C "$TMPDIR"
|
||||
|
||||
[ -f "$TMPDIR/$BIN_NAME" ] || die "archive did not contain $BIN_NAME"
|
||||
|
||||
install_binary "$TMPDIR/$BIN_NAME" "$INSTALL_DIR/$BIN_NAME"
|
||||
|
||||
say "Installed: $INSTALL_DIR/$BIN_NAME"
|
||||
"$INSTALL_DIR/$BIN_NAME" --version 2>/dev/null || true
|
||||
107
src/api/config_store.rs
Normal file
107
src/api/config_store.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
use std::io::Write;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use hyper::header::IF_MATCH;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
|
||||
use super::model::ApiFailure;
|
||||
|
||||
pub(super) fn parse_if_match(headers: &hyper::HeaderMap) -> Option<String> {
|
||||
headers
|
||||
.get(IF_MATCH)
|
||||
.and_then(|value| value.to_str().ok())
|
||||
.map(str::trim)
|
||||
.filter(|value| !value.is_empty())
|
||||
.map(|value| value.trim_matches('"').to_string())
|
||||
}
|
||||
|
||||
pub(super) async fn ensure_expected_revision(
|
||||
config_path: &Path,
|
||||
expected_revision: Option<&str>,
|
||||
) -> Result<(), ApiFailure> {
|
||||
let Some(expected) = expected_revision else {
|
||||
return Ok(());
|
||||
};
|
||||
let current = current_revision(config_path).await?;
|
||||
if current != expected {
|
||||
return Err(ApiFailure::new(
|
||||
hyper::StatusCode::CONFLICT,
|
||||
"revision_conflict",
|
||||
"Config revision mismatch",
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) async fn current_revision(config_path: &Path) -> Result<String, ApiFailure> {
|
||||
let content = tokio::fs::read_to_string(config_path)
|
||||
.await
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to read config: {}", e)))?;
|
||||
Ok(compute_revision(&content))
|
||||
}
|
||||
|
||||
pub(super) fn compute_revision(content: &str) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(content.as_bytes());
|
||||
hex::encode(hasher.finalize())
|
||||
}
|
||||
|
||||
pub(super) async fn load_config_from_disk(config_path: &Path) -> Result<ProxyConfig, ApiFailure> {
|
||||
let config_path = config_path.to_path_buf();
|
||||
tokio::task::spawn_blocking(move || ProxyConfig::load(config_path))
|
||||
.await
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to join config loader: {}", e)))?
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to load config: {}", e)))
|
||||
}
|
||||
|
||||
pub(super) async fn save_config_to_disk(
|
||||
config_path: &Path,
|
||||
cfg: &ProxyConfig,
|
||||
) -> Result<String, ApiFailure> {
|
||||
let serialized = toml::to_string_pretty(cfg)
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to serialize config: {}", e)))?;
|
||||
write_atomic(config_path.to_path_buf(), serialized.clone()).await?;
|
||||
Ok(compute_revision(&serialized))
|
||||
}
|
||||
|
||||
async fn write_atomic(path: PathBuf, contents: String) -> Result<(), ApiFailure> {
|
||||
tokio::task::spawn_blocking(move || write_atomic_sync(&path, &contents))
|
||||
.await
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to join writer: {}", e)))?
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to write config: {}", e)))
|
||||
}
|
||||
|
||||
fn write_atomic_sync(path: &Path, contents: &str) -> std::io::Result<()> {
|
||||
let parent = path.parent().unwrap_or_else(|| Path::new("."));
|
||||
std::fs::create_dir_all(parent)?;
|
||||
|
||||
let tmp_name = format!(
|
||||
".{}.tmp-{}",
|
||||
path.file_name()
|
||||
.and_then(|s| s.to_str())
|
||||
.unwrap_or("config.toml"),
|
||||
rand::random::<u64>()
|
||||
);
|
||||
let tmp_path = parent.join(tmp_name);
|
||||
|
||||
let write_result = (|| {
|
||||
let mut file = std::fs::OpenOptions::new()
|
||||
.create_new(true)
|
||||
.write(true)
|
||||
.open(&tmp_path)?;
|
||||
file.write_all(contents.as_bytes())?;
|
||||
file.sync_all()?;
|
||||
std::fs::rename(&tmp_path, path)?;
|
||||
if let Ok(dir) = std::fs::File::open(parent) {
|
||||
let _ = dir.sync_all();
|
||||
}
|
||||
Ok(())
|
||||
})();
|
||||
|
||||
if write_result.is_err() {
|
||||
let _ = std::fs::remove_file(&tmp_path);
|
||||
}
|
||||
write_result
|
||||
}
|
||||
90
src/api/events.rs
Normal file
90
src/api/events.rs
Normal file
@@ -0,0 +1,90 @@
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::Mutex;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct ApiEventRecord {
|
||||
pub(super) seq: u64,
|
||||
pub(super) ts_epoch_secs: u64,
|
||||
pub(super) event_type: String,
|
||||
pub(super) context: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct ApiEventSnapshot {
|
||||
pub(super) capacity: usize,
|
||||
pub(super) dropped_total: u64,
|
||||
pub(super) events: Vec<ApiEventRecord>,
|
||||
}
|
||||
|
||||
struct ApiEventsInner {
|
||||
capacity: usize,
|
||||
dropped_total: u64,
|
||||
next_seq: u64,
|
||||
events: VecDeque<ApiEventRecord>,
|
||||
}
|
||||
|
||||
/// Bounded ring-buffer for control-plane API/runtime events.
|
||||
pub(crate) struct ApiEventStore {
|
||||
inner: Mutex<ApiEventsInner>,
|
||||
}
|
||||
|
||||
impl ApiEventStore {
|
||||
pub(super) fn new(capacity: usize) -> Self {
|
||||
let bounded = capacity.max(16);
|
||||
Self {
|
||||
inner: Mutex::new(ApiEventsInner {
|
||||
capacity: bounded,
|
||||
dropped_total: 0,
|
||||
next_seq: 1,
|
||||
events: VecDeque::with_capacity(bounded),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn record(&self, event_type: &str, context: impl Into<String>) {
|
||||
let now_epoch_secs = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
let mut context = context.into();
|
||||
if context.len() > 256 {
|
||||
context.truncate(256);
|
||||
}
|
||||
|
||||
let mut guard = self.inner.lock().expect("api event store mutex poisoned");
|
||||
if guard.events.len() == guard.capacity {
|
||||
guard.events.pop_front();
|
||||
guard.dropped_total = guard.dropped_total.saturating_add(1);
|
||||
}
|
||||
let seq = guard.next_seq;
|
||||
guard.next_seq = guard.next_seq.saturating_add(1);
|
||||
guard.events.push_back(ApiEventRecord {
|
||||
seq,
|
||||
ts_epoch_secs: now_epoch_secs,
|
||||
event_type: event_type.to_string(),
|
||||
context,
|
||||
});
|
||||
}
|
||||
|
||||
pub(super) fn snapshot(&self, limit: usize) -> ApiEventSnapshot {
|
||||
let guard = self.inner.lock().expect("api event store mutex poisoned");
|
||||
let bounded_limit = limit.clamp(1, guard.capacity.max(1));
|
||||
let mut items: Vec<ApiEventRecord> = guard
|
||||
.events
|
||||
.iter()
|
||||
.rev()
|
||||
.take(bounded_limit)
|
||||
.cloned()
|
||||
.collect();
|
||||
items.reverse();
|
||||
|
||||
ApiEventSnapshot {
|
||||
capacity: guard.capacity,
|
||||
dropped_total: guard.dropped_total,
|
||||
events: items,
|
||||
}
|
||||
}
|
||||
}
|
||||
91
src/api/http_utils.rs
Normal file
91
src/api/http_utils.rs
Normal file
@@ -0,0 +1,91 @@
|
||||
use http_body_util::{BodyExt, Full};
|
||||
use hyper::StatusCode;
|
||||
use hyper::body::{Bytes, Incoming};
|
||||
use serde::Serialize;
|
||||
use serde::de::DeserializeOwned;
|
||||
|
||||
use super::model::{ApiFailure, ErrorBody, ErrorResponse, SuccessResponse};
|
||||
|
||||
pub(super) fn success_response<T: Serialize>(
|
||||
status: StatusCode,
|
||||
data: T,
|
||||
revision: String,
|
||||
) -> hyper::Response<Full<Bytes>> {
|
||||
let payload = SuccessResponse {
|
||||
ok: true,
|
||||
data,
|
||||
revision,
|
||||
};
|
||||
let body = serde_json::to_vec(&payload).unwrap_or_else(|_| b"{\"ok\":false}".to_vec());
|
||||
hyper::Response::builder()
|
||||
.status(status)
|
||||
.header("content-type", "application/json; charset=utf-8")
|
||||
.body(Full::new(Bytes::from(body)))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub(super) fn error_response(
|
||||
request_id: u64,
|
||||
failure: ApiFailure,
|
||||
) -> hyper::Response<Full<Bytes>> {
|
||||
let payload = ErrorResponse {
|
||||
ok: false,
|
||||
error: ErrorBody {
|
||||
code: failure.code,
|
||||
message: failure.message,
|
||||
},
|
||||
request_id,
|
||||
};
|
||||
let body = serde_json::to_vec(&payload).unwrap_or_else(|_| {
|
||||
format!(
|
||||
"{{\"ok\":false,\"error\":{{\"code\":\"internal_error\",\"message\":\"serialization failed\"}},\"request_id\":{}}}",
|
||||
request_id
|
||||
)
|
||||
.into_bytes()
|
||||
});
|
||||
hyper::Response::builder()
|
||||
.status(failure.status)
|
||||
.header("content-type", "application/json; charset=utf-8")
|
||||
.body(Full::new(Bytes::from(body)))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub(super) async fn read_json<T: DeserializeOwned>(
|
||||
body: Incoming,
|
||||
limit: usize,
|
||||
) -> Result<T, ApiFailure> {
|
||||
let bytes = read_body_with_limit(body, limit).await?;
|
||||
serde_json::from_slice(&bytes).map_err(|_| ApiFailure::bad_request("Invalid JSON body"))
|
||||
}
|
||||
|
||||
pub(super) async fn read_optional_json<T: DeserializeOwned>(
|
||||
body: Incoming,
|
||||
limit: usize,
|
||||
) -> Result<Option<T>, ApiFailure> {
|
||||
let bytes = read_body_with_limit(body, limit).await?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
serde_json::from_slice(&bytes)
|
||||
.map(Some)
|
||||
.map_err(|_| ApiFailure::bad_request("Invalid JSON body"))
|
||||
}
|
||||
|
||||
async fn read_body_with_limit(body: Incoming, limit: usize) -> Result<Vec<u8>, ApiFailure> {
|
||||
let mut collected = Vec::new();
|
||||
let mut body = body;
|
||||
while let Some(frame_result) = body.frame().await {
|
||||
let frame = frame_result.map_err(|_| ApiFailure::bad_request("Invalid request body"))?;
|
||||
if let Some(chunk) = frame.data_ref() {
|
||||
if collected.len().saturating_add(chunk.len()) > limit {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::PAYLOAD_TOO_LARGE,
|
||||
"payload_too_large",
|
||||
format!("Body exceeds {} bytes", limit),
|
||||
));
|
||||
}
|
||||
collected.extend_from_slice(chunk);
|
||||
}
|
||||
}
|
||||
Ok(collected)
|
||||
}
|
||||
540
src/api/mod.rs
Normal file
540
src/api/mod.rs
Normal file
@@ -0,0 +1,540 @@
|
||||
use std::convert::Infallible;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
|
||||
|
||||
use http_body_util::Full;
|
||||
use hyper::body::{Bytes, Incoming};
|
||||
use hyper::header::AUTHORIZATION;
|
||||
use hyper::server::conn::http1;
|
||||
use hyper::service::service_fn;
|
||||
use hyper::{Method, Request, Response, StatusCode};
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::sync::{Mutex, RwLock, watch};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::startup::StartupTracker;
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
use crate::transport::UpstreamManager;
|
||||
|
||||
mod config_store;
|
||||
mod events;
|
||||
mod http_utils;
|
||||
mod model;
|
||||
mod runtime_edge;
|
||||
mod runtime_init;
|
||||
mod runtime_min;
|
||||
mod runtime_stats;
|
||||
mod runtime_watch;
|
||||
mod runtime_zero;
|
||||
mod users;
|
||||
|
||||
use config_store::{current_revision, parse_if_match};
|
||||
use http_utils::{error_response, read_json, read_optional_json, success_response};
|
||||
use events::ApiEventStore;
|
||||
use model::{
|
||||
ApiFailure, CreateUserRequest, HealthData, PatchUserRequest, RotateSecretRequest, SummaryData,
|
||||
};
|
||||
use runtime_edge::{
|
||||
EdgeConnectionsCacheEntry, build_runtime_connections_summary_data,
|
||||
build_runtime_events_recent_data,
|
||||
};
|
||||
use runtime_init::build_runtime_initialization_data;
|
||||
use runtime_min::{
|
||||
build_runtime_me_pool_state_data, build_runtime_me_quality_data, build_runtime_nat_stun_data,
|
||||
build_runtime_upstream_quality_data, build_security_whitelist_data,
|
||||
};
|
||||
use runtime_stats::{
|
||||
MinimalCacheEntry, build_dcs_data, build_me_writers_data, build_minimal_all_data,
|
||||
build_upstreams_data, build_zero_all_data,
|
||||
};
|
||||
use runtime_zero::{
|
||||
build_limits_effective_data, build_runtime_gates_data, build_security_posture_data,
|
||||
build_system_info_data,
|
||||
};
|
||||
use runtime_watch::spawn_runtime_watchers;
|
||||
use users::{create_user, delete_user, patch_user, rotate_secret, users_from_config};
|
||||
|
||||
pub(super) struct ApiRuntimeState {
|
||||
pub(super) process_started_at_epoch_secs: u64,
|
||||
pub(super) config_reload_count: AtomicU64,
|
||||
pub(super) last_config_reload_epoch_secs: AtomicU64,
|
||||
pub(super) admission_open: AtomicBool,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(super) struct ApiShared {
|
||||
pub(super) stats: Arc<Stats>,
|
||||
pub(super) ip_tracker: Arc<UserIpTracker>,
|
||||
pub(super) me_pool: Arc<RwLock<Option<Arc<MePool>>>>,
|
||||
pub(super) upstream_manager: Arc<UpstreamManager>,
|
||||
pub(super) config_path: PathBuf,
|
||||
pub(super) startup_detected_ip_v4: Option<IpAddr>,
|
||||
pub(super) startup_detected_ip_v6: Option<IpAddr>,
|
||||
pub(super) mutation_lock: Arc<Mutex<()>>,
|
||||
pub(super) minimal_cache: Arc<Mutex<Option<MinimalCacheEntry>>>,
|
||||
pub(super) runtime_edge_connections_cache: Arc<Mutex<Option<EdgeConnectionsCacheEntry>>>,
|
||||
pub(super) runtime_edge_recompute_lock: Arc<Mutex<()>>,
|
||||
pub(super) runtime_events: Arc<ApiEventStore>,
|
||||
pub(super) request_id: Arc<AtomicU64>,
|
||||
pub(super) runtime_state: Arc<ApiRuntimeState>,
|
||||
pub(super) startup_tracker: Arc<StartupTracker>,
|
||||
}
|
||||
|
||||
impl ApiShared {
|
||||
fn next_request_id(&self) -> u64 {
|
||||
self.request_id.fetch_add(1, Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn serve(
|
||||
listen: SocketAddr,
|
||||
stats: Arc<Stats>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
me_pool: Arc<RwLock<Option<Arc<MePool>>>>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
admission_rx: watch::Receiver<bool>,
|
||||
config_path: PathBuf,
|
||||
startup_detected_ip_v4: Option<IpAddr>,
|
||||
startup_detected_ip_v6: Option<IpAddr>,
|
||||
process_started_at_epoch_secs: u64,
|
||||
startup_tracker: Arc<StartupTracker>,
|
||||
) {
|
||||
let listener = match TcpListener::bind(listen).await {
|
||||
Ok(listener) => listener,
|
||||
Err(error) => {
|
||||
warn!(
|
||||
error = %error,
|
||||
listen = %listen,
|
||||
"Failed to bind API listener"
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
info!("API endpoint: http://{}/v1/*", listen);
|
||||
|
||||
let runtime_state = Arc::new(ApiRuntimeState {
|
||||
process_started_at_epoch_secs,
|
||||
config_reload_count: AtomicU64::new(0),
|
||||
last_config_reload_epoch_secs: AtomicU64::new(0),
|
||||
admission_open: AtomicBool::new(*admission_rx.borrow()),
|
||||
});
|
||||
|
||||
let shared = Arc::new(ApiShared {
|
||||
stats,
|
||||
ip_tracker,
|
||||
me_pool,
|
||||
upstream_manager,
|
||||
config_path,
|
||||
startup_detected_ip_v4,
|
||||
startup_detected_ip_v6,
|
||||
mutation_lock: Arc::new(Mutex::new(())),
|
||||
minimal_cache: Arc::new(Mutex::new(None)),
|
||||
runtime_edge_connections_cache: Arc::new(Mutex::new(None)),
|
||||
runtime_edge_recompute_lock: Arc::new(Mutex::new(())),
|
||||
runtime_events: Arc::new(ApiEventStore::new(
|
||||
config_rx.borrow().server.api.runtime_edge_events_capacity,
|
||||
)),
|
||||
request_id: Arc::new(AtomicU64::new(1)),
|
||||
runtime_state: runtime_state.clone(),
|
||||
startup_tracker,
|
||||
});
|
||||
|
||||
spawn_runtime_watchers(
|
||||
config_rx.clone(),
|
||||
admission_rx.clone(),
|
||||
runtime_state.clone(),
|
||||
shared.runtime_events.clone(),
|
||||
);
|
||||
|
||||
loop {
|
||||
let (stream, peer) = match listener.accept().await {
|
||||
Ok(v) => v,
|
||||
Err(error) => {
|
||||
warn!(error = %error, "API accept error");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let shared_conn = shared.clone();
|
||||
let config_rx_conn = config_rx.clone();
|
||||
tokio::spawn(async move {
|
||||
let svc = service_fn(move |req: Request<Incoming>| {
|
||||
let shared_req = shared_conn.clone();
|
||||
let config_rx_req = config_rx_conn.clone();
|
||||
async move { handle(req, peer, shared_req, config_rx_req).await }
|
||||
});
|
||||
if let Err(error) = http1::Builder::new()
|
||||
.serve_connection(hyper_util::rt::TokioIo::new(stream), svc)
|
||||
.await
|
||||
{
|
||||
debug!(error = %error, "API connection error");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
req: Request<Incoming>,
|
||||
peer: SocketAddr,
|
||||
shared: Arc<ApiShared>,
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
) -> Result<Response<Full<Bytes>>, Infallible> {
|
||||
let request_id = shared.next_request_id();
|
||||
let cfg = config_rx.borrow().clone();
|
||||
let api_cfg = &cfg.server.api;
|
||||
|
||||
if !api_cfg.enabled {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
"api_disabled",
|
||||
"API is disabled",
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
if !api_cfg.whitelist.is_empty()
|
||||
&& !api_cfg
|
||||
.whitelist
|
||||
.iter()
|
||||
.any(|net| net.contains(peer.ip()))
|
||||
{
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(StatusCode::FORBIDDEN, "forbidden", "Source IP is not allowed"),
|
||||
));
|
||||
}
|
||||
|
||||
if !api_cfg.auth_header.is_empty() {
|
||||
let auth_ok = req
|
||||
.headers()
|
||||
.get(AUTHORIZATION)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|v| v == api_cfg.auth_header)
|
||||
.unwrap_or(false);
|
||||
if !auth_ok {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::UNAUTHORIZED,
|
||||
"unauthorized",
|
||||
"Missing or invalid Authorization header",
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let method = req.method().clone();
|
||||
let path = req.uri().path().to_string();
|
||||
let query = req.uri().query().map(str::to_string);
|
||||
let body_limit = api_cfg.request_body_limit_bytes;
|
||||
|
||||
let result: Result<Response<Full<Bytes>>, ApiFailure> = async {
|
||||
match (method.as_str(), path.as_str()) {
|
||||
("GET", "/v1/health") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = HealthData {
|
||||
status: "ok",
|
||||
read_only: api_cfg.read_only,
|
||||
};
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/system/info") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_system_info_data(shared.as_ref(), cfg.as_ref(), &revision);
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/gates") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_gates_data(shared.as_ref(), cfg.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/initialization") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_initialization_data(shared.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/limits/effective") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_limits_effective_data(cfg.as_ref());
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/security/posture") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_security_posture_data(cfg.as_ref());
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/security/whitelist") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_security_whitelist_data(cfg.as_ref());
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/summary") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = SummaryData {
|
||||
uptime_seconds: shared.stats.uptime_secs(),
|
||||
connections_total: shared.stats.get_connects_all(),
|
||||
connections_bad_total: shared.stats.get_connects_bad(),
|
||||
handshake_timeouts_total: shared.stats.get_handshake_timeouts(),
|
||||
configured_users: cfg.access.users.len(),
|
||||
};
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/zero/all") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_zero_all_data(&shared.stats, cfg.access.users.len());
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/upstreams") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_upstreams_data(shared.as_ref(), api_cfg);
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/minimal/all") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_minimal_all_data(shared.as_ref(), api_cfg).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/me-writers") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_me_writers_data(shared.as_ref(), api_cfg).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/dcs") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_dcs_data(shared.as_ref(), api_cfg).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/me_pool_state") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_me_pool_state_data(shared.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/me_quality") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_me_quality_data(shared.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/upstream_quality") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_upstream_quality_data(shared.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/nat_stun") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_nat_stun_data(shared.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/connections/summary") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_connections_summary_data(shared.as_ref(), cfg.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/events/recent") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_events_recent_data(
|
||||
shared.as_ref(),
|
||||
cfg.as_ref(),
|
||||
query.as_deref(),
|
||||
);
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/users") | ("GET", "/v1/users") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
shared.startup_detected_ip_v4,
|
||||
shared.startup_detected_ip_v6,
|
||||
)
|
||||
.await;
|
||||
Ok(success_response(StatusCode::OK, users, revision))
|
||||
}
|
||||
("POST", "/v1/users") => {
|
||||
if api_cfg.read_only {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::FORBIDDEN,
|
||||
"read_only",
|
||||
"API runs in read-only mode",
|
||||
),
|
||||
));
|
||||
}
|
||||
let expected_revision = parse_if_match(req.headers());
|
||||
let body = read_json::<CreateUserRequest>(req.into_body(), body_limit).await?;
|
||||
let result = create_user(body, expected_revision, &shared).await;
|
||||
let (data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared.runtime_events.record("api.user.create.failed", error.code);
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
shared
|
||||
.runtime_events
|
||||
.record("api.user.create.ok", format!("username={}", data.user.username));
|
||||
Ok(success_response(StatusCode::CREATED, data, revision))
|
||||
}
|
||||
_ => {
|
||||
if let Some(user) = path.strip_prefix("/v1/users/")
|
||||
&& !user.is_empty()
|
||||
&& !user.contains('/')
|
||||
{
|
||||
if method == Method::GET {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
shared.startup_detected_ip_v4,
|
||||
shared.startup_detected_ip_v6,
|
||||
)
|
||||
.await;
|
||||
if let Some(user_info) = users.into_iter().find(|entry| entry.username == user)
|
||||
{
|
||||
return Ok(success_response(StatusCode::OK, user_info, revision));
|
||||
}
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(StatusCode::NOT_FOUND, "not_found", "User not found"),
|
||||
));
|
||||
}
|
||||
if method == Method::PATCH {
|
||||
if api_cfg.read_only {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::FORBIDDEN,
|
||||
"read_only",
|
||||
"API runs in read-only mode",
|
||||
),
|
||||
));
|
||||
}
|
||||
let expected_revision = parse_if_match(req.headers());
|
||||
let body = read_json::<PatchUserRequest>(req.into_body(), body_limit).await?;
|
||||
let result = patch_user(user, body, expected_revision, &shared).await;
|
||||
let (data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared.runtime_events.record(
|
||||
"api.user.patch.failed",
|
||||
format!("username={} code={}", user, error.code),
|
||||
);
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
shared
|
||||
.runtime_events
|
||||
.record("api.user.patch.ok", format!("username={}", data.username));
|
||||
return Ok(success_response(StatusCode::OK, data, revision));
|
||||
}
|
||||
if method == Method::DELETE {
|
||||
if api_cfg.read_only {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::FORBIDDEN,
|
||||
"read_only",
|
||||
"API runs in read-only mode",
|
||||
),
|
||||
));
|
||||
}
|
||||
let expected_revision = parse_if_match(req.headers());
|
||||
let result = delete_user(user, expected_revision, &shared).await;
|
||||
let (deleted_user, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared.runtime_events.record(
|
||||
"api.user.delete.failed",
|
||||
format!("username={} code={}", user, error.code),
|
||||
);
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
shared.runtime_events.record(
|
||||
"api.user.delete.ok",
|
||||
format!("username={}", deleted_user),
|
||||
);
|
||||
return Ok(success_response(StatusCode::OK, deleted_user, revision));
|
||||
}
|
||||
if method == Method::POST
|
||||
&& let Some(base_user) = user.strip_suffix("/rotate-secret")
|
||||
&& !base_user.is_empty()
|
||||
&& !base_user.contains('/')
|
||||
{
|
||||
if api_cfg.read_only {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::FORBIDDEN,
|
||||
"read_only",
|
||||
"API runs in read-only mode",
|
||||
),
|
||||
));
|
||||
}
|
||||
let expected_revision = parse_if_match(req.headers());
|
||||
let body =
|
||||
read_optional_json::<RotateSecretRequest>(req.into_body(), body_limit)
|
||||
.await?;
|
||||
let result = rotate_secret(
|
||||
base_user,
|
||||
body.unwrap_or_default(),
|
||||
expected_revision,
|
||||
&shared,
|
||||
)
|
||||
.await;
|
||||
let (data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared.runtime_events.record(
|
||||
"api.user.rotate_secret.failed",
|
||||
format!("username={} code={}", base_user, error.code),
|
||||
);
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
shared.runtime_events.record(
|
||||
"api.user.rotate_secret.ok",
|
||||
format!("username={}", base_user),
|
||||
);
|
||||
return Ok(success_response(StatusCode::OK, data, revision));
|
||||
}
|
||||
if method == Method::POST {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(StatusCode::NOT_FOUND, "not_found", "Route not found"),
|
||||
));
|
||||
}
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::METHOD_NOT_ALLOWED,
|
||||
"method_not_allowed",
|
||||
"Unsupported HTTP method for this route",
|
||||
),
|
||||
));
|
||||
}
|
||||
Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(StatusCode::NOT_FOUND, "not_found", "Route not found"),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(resp) => Ok(resp),
|
||||
Err(error) => Ok(error_response(request_id, error)),
|
||||
}
|
||||
}
|
||||
477
src/api/model.rs
Normal file
477
src/api/model.rs
Normal file
@@ -0,0 +1,477 @@
|
||||
use std::net::IpAddr;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use hyper::StatusCode;
|
||||
use rand::Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
const MAX_USERNAME_LEN: usize = 64;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(super) struct ApiFailure {
|
||||
pub(super) status: StatusCode,
|
||||
pub(super) code: &'static str,
|
||||
pub(super) message: String,
|
||||
}
|
||||
|
||||
impl ApiFailure {
|
||||
pub(super) fn new(status: StatusCode, code: &'static str, message: impl Into<String>) -> Self {
|
||||
Self {
|
||||
status,
|
||||
code,
|
||||
message: message.into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn internal(message: impl Into<String>) -> Self {
|
||||
Self::new(StatusCode::INTERNAL_SERVER_ERROR, "internal_error", message)
|
||||
}
|
||||
|
||||
pub(super) fn bad_request(message: impl Into<String>) -> Self {
|
||||
Self::new(StatusCode::BAD_REQUEST, "bad_request", message)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct ErrorBody {
|
||||
pub(super) code: &'static str,
|
||||
pub(super) message: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct ErrorResponse {
|
||||
pub(super) ok: bool,
|
||||
pub(super) error: ErrorBody,
|
||||
pub(super) request_id: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct SuccessResponse<T> {
|
||||
pub(super) ok: bool,
|
||||
pub(super) data: T,
|
||||
pub(super) revision: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct HealthData {
|
||||
pub(super) status: &'static str,
|
||||
pub(super) read_only: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct SummaryData {
|
||||
pub(super) uptime_seconds: f64,
|
||||
pub(super) connections_total: u64,
|
||||
pub(super) connections_bad_total: u64,
|
||||
pub(super) handshake_timeouts_total: u64,
|
||||
pub(super) configured_users: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroCodeCount {
|
||||
pub(super) code: i32,
|
||||
pub(super) total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroCoreData {
|
||||
pub(super) uptime_seconds: f64,
|
||||
pub(super) connections_total: u64,
|
||||
pub(super) connections_bad_total: u64,
|
||||
pub(super) handshake_timeouts_total: u64,
|
||||
pub(super) configured_users: usize,
|
||||
pub(super) telemetry_core_enabled: bool,
|
||||
pub(super) telemetry_user_enabled: bool,
|
||||
pub(super) telemetry_me_level: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroUpstreamData {
|
||||
pub(super) connect_attempt_total: u64,
|
||||
pub(super) connect_success_total: u64,
|
||||
pub(super) connect_fail_total: u64,
|
||||
pub(super) connect_failfast_hard_error_total: u64,
|
||||
pub(super) connect_attempts_bucket_1: u64,
|
||||
pub(super) connect_attempts_bucket_2: u64,
|
||||
pub(super) connect_attempts_bucket_3_4: u64,
|
||||
pub(super) connect_attempts_bucket_gt_4: u64,
|
||||
pub(super) connect_duration_success_bucket_le_100ms: u64,
|
||||
pub(super) connect_duration_success_bucket_101_500ms: u64,
|
||||
pub(super) connect_duration_success_bucket_501_1000ms: u64,
|
||||
pub(super) connect_duration_success_bucket_gt_1000ms: u64,
|
||||
pub(super) connect_duration_fail_bucket_le_100ms: u64,
|
||||
pub(super) connect_duration_fail_bucket_101_500ms: u64,
|
||||
pub(super) connect_duration_fail_bucket_501_1000ms: u64,
|
||||
pub(super) connect_duration_fail_bucket_gt_1000ms: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct UpstreamDcStatus {
|
||||
pub(super) dc: i16,
|
||||
pub(super) latency_ema_ms: Option<f64>,
|
||||
pub(super) ip_preference: &'static str,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct UpstreamStatus {
|
||||
pub(super) upstream_id: usize,
|
||||
pub(super) route_kind: &'static str,
|
||||
pub(super) address: String,
|
||||
pub(super) weight: u16,
|
||||
pub(super) scopes: String,
|
||||
pub(super) healthy: bool,
|
||||
pub(super) fails: u32,
|
||||
pub(super) last_check_age_secs: u64,
|
||||
pub(super) effective_latency_ms: Option<f64>,
|
||||
pub(super) dc: Vec<UpstreamDcStatus>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct UpstreamSummaryData {
|
||||
pub(super) configured_total: usize,
|
||||
pub(super) healthy_total: usize,
|
||||
pub(super) unhealthy_total: usize,
|
||||
pub(super) direct_total: usize,
|
||||
pub(super) socks4_total: usize,
|
||||
pub(super) socks5_total: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct UpstreamsData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) zero: ZeroUpstreamData,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) summary: Option<UpstreamSummaryData>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) upstreams: Option<Vec<UpstreamStatus>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroMiddleProxyData {
|
||||
pub(super) keepalive_sent_total: u64,
|
||||
pub(super) keepalive_failed_total: u64,
|
||||
pub(super) keepalive_pong_total: u64,
|
||||
pub(super) keepalive_timeout_total: u64,
|
||||
pub(super) rpc_proxy_req_signal_sent_total: u64,
|
||||
pub(super) rpc_proxy_req_signal_failed_total: u64,
|
||||
pub(super) rpc_proxy_req_signal_skipped_no_meta_total: u64,
|
||||
pub(super) rpc_proxy_req_signal_response_total: u64,
|
||||
pub(super) rpc_proxy_req_signal_close_sent_total: u64,
|
||||
pub(super) reconnect_attempt_total: u64,
|
||||
pub(super) reconnect_success_total: u64,
|
||||
pub(super) handshake_reject_total: u64,
|
||||
pub(super) handshake_error_codes: Vec<ZeroCodeCount>,
|
||||
pub(super) reader_eof_total: u64,
|
||||
pub(super) idle_close_by_peer_total: u64,
|
||||
pub(super) route_drop_no_conn_total: u64,
|
||||
pub(super) route_drop_channel_closed_total: u64,
|
||||
pub(super) route_drop_queue_full_total: u64,
|
||||
pub(super) route_drop_queue_full_base_total: u64,
|
||||
pub(super) route_drop_queue_full_high_total: u64,
|
||||
pub(super) socks_kdf_strict_reject_total: u64,
|
||||
pub(super) socks_kdf_compat_fallback_total: u64,
|
||||
pub(super) endpoint_quarantine_total: u64,
|
||||
pub(super) kdf_drift_total: u64,
|
||||
pub(super) kdf_port_only_drift_total: u64,
|
||||
pub(super) hardswap_pending_reuse_total: u64,
|
||||
pub(super) hardswap_pending_ttl_expired_total: u64,
|
||||
pub(super) single_endpoint_outage_enter_total: u64,
|
||||
pub(super) single_endpoint_outage_exit_total: u64,
|
||||
pub(super) single_endpoint_outage_reconnect_attempt_total: u64,
|
||||
pub(super) single_endpoint_outage_reconnect_success_total: u64,
|
||||
pub(super) single_endpoint_quarantine_bypass_total: u64,
|
||||
pub(super) single_endpoint_shadow_rotate_total: u64,
|
||||
pub(super) single_endpoint_shadow_rotate_skipped_quarantine_total: u64,
|
||||
pub(super) floor_mode_switch_total: u64,
|
||||
pub(super) floor_mode_switch_static_to_adaptive_total: u64,
|
||||
pub(super) floor_mode_switch_adaptive_to_static_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroPoolData {
|
||||
pub(super) pool_swap_total: u64,
|
||||
pub(super) pool_drain_active: u64,
|
||||
pub(super) pool_force_close_total: u64,
|
||||
pub(super) pool_stale_pick_total: u64,
|
||||
pub(super) writer_removed_total: u64,
|
||||
pub(super) writer_removed_unexpected_total: u64,
|
||||
pub(super) refill_triggered_total: u64,
|
||||
pub(super) refill_skipped_inflight_total: u64,
|
||||
pub(super) refill_failed_total: u64,
|
||||
pub(super) writer_restored_same_endpoint_total: u64,
|
||||
pub(super) writer_restored_fallback_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroDesyncData {
|
||||
pub(super) secure_padding_invalid_total: u64,
|
||||
pub(super) desync_total: u64,
|
||||
pub(super) desync_full_logged_total: u64,
|
||||
pub(super) desync_suppressed_total: u64,
|
||||
pub(super) desync_frames_bucket_0: u64,
|
||||
pub(super) desync_frames_bucket_1_2: u64,
|
||||
pub(super) desync_frames_bucket_3_10: u64,
|
||||
pub(super) desync_frames_bucket_gt_10: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroAllData {
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) core: ZeroCoreData,
|
||||
pub(super) upstream: ZeroUpstreamData,
|
||||
pub(super) middle_proxy: ZeroMiddleProxyData,
|
||||
pub(super) pool: ZeroPoolData,
|
||||
pub(super) desync: ZeroDesyncData,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MeWritersSummary {
|
||||
pub(super) configured_dc_groups: usize,
|
||||
pub(super) configured_endpoints: usize,
|
||||
pub(super) available_endpoints: usize,
|
||||
pub(super) available_pct: f64,
|
||||
pub(super) required_writers: usize,
|
||||
pub(super) alive_writers: usize,
|
||||
pub(super) coverage_pct: f64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MeWriterStatus {
|
||||
pub(super) writer_id: u64,
|
||||
pub(super) dc: Option<i16>,
|
||||
pub(super) endpoint: String,
|
||||
pub(super) generation: u64,
|
||||
pub(super) state: &'static str,
|
||||
pub(super) draining: bool,
|
||||
pub(super) degraded: bool,
|
||||
pub(super) bound_clients: usize,
|
||||
pub(super) idle_for_secs: Option<u64>,
|
||||
pub(super) rtt_ema_ms: Option<f64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MeWritersData {
|
||||
pub(super) middle_proxy_enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) summary: MeWritersSummary,
|
||||
pub(super) writers: Vec<MeWriterStatus>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct DcStatus {
|
||||
pub(super) dc: i16,
|
||||
pub(super) endpoints: Vec<String>,
|
||||
pub(super) endpoint_writers: Vec<DcEndpointWriters>,
|
||||
pub(super) available_endpoints: usize,
|
||||
pub(super) available_pct: f64,
|
||||
pub(super) required_writers: usize,
|
||||
pub(super) floor_min: usize,
|
||||
pub(super) floor_target: usize,
|
||||
pub(super) floor_max: usize,
|
||||
pub(super) floor_capped: bool,
|
||||
pub(super) alive_writers: usize,
|
||||
pub(super) coverage_pct: f64,
|
||||
pub(super) rtt_ms: Option<f64>,
|
||||
pub(super) load: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct DcEndpointWriters {
|
||||
pub(super) endpoint: String,
|
||||
pub(super) active_writers: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct DcStatusData {
|
||||
pub(super) middle_proxy_enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) dcs: Vec<DcStatus>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MinimalQuarantineData {
|
||||
pub(super) endpoint: String,
|
||||
pub(super) remaining_ms: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MinimalDcPathData {
|
||||
pub(super) dc: i16,
|
||||
pub(super) ip_preference: Option<&'static str>,
|
||||
pub(super) selected_addr_v4: Option<String>,
|
||||
pub(super) selected_addr_v6: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MinimalMeRuntimeData {
|
||||
pub(super) active_generation: u64,
|
||||
pub(super) warm_generation: u64,
|
||||
pub(super) pending_hardswap_generation: u64,
|
||||
pub(super) pending_hardswap_age_secs: Option<u64>,
|
||||
pub(super) hardswap_enabled: bool,
|
||||
pub(super) floor_mode: &'static str,
|
||||
pub(super) adaptive_floor_idle_secs: u64,
|
||||
pub(super) adaptive_floor_min_writers_single_endpoint: u8,
|
||||
pub(super) adaptive_floor_min_writers_multi_endpoint: u8,
|
||||
pub(super) adaptive_floor_recover_grace_secs: u64,
|
||||
pub(super) adaptive_floor_writers_per_core_total: u16,
|
||||
pub(super) adaptive_floor_cpu_cores_override: u16,
|
||||
pub(super) adaptive_floor_max_extra_writers_single_per_core: u16,
|
||||
pub(super) adaptive_floor_max_extra_writers_multi_per_core: u16,
|
||||
pub(super) adaptive_floor_max_active_writers_per_core: u16,
|
||||
pub(super) adaptive_floor_max_warm_writers_per_core: u16,
|
||||
pub(super) adaptive_floor_max_active_writers_global: u32,
|
||||
pub(super) adaptive_floor_max_warm_writers_global: u32,
|
||||
pub(super) adaptive_floor_cpu_cores_detected: u32,
|
||||
pub(super) adaptive_floor_cpu_cores_effective: u32,
|
||||
pub(super) adaptive_floor_global_cap_raw: u64,
|
||||
pub(super) adaptive_floor_global_cap_effective: u64,
|
||||
pub(super) adaptive_floor_target_writers_total: u64,
|
||||
pub(super) adaptive_floor_active_cap_configured: u64,
|
||||
pub(super) adaptive_floor_active_cap_effective: u64,
|
||||
pub(super) adaptive_floor_warm_cap_configured: u64,
|
||||
pub(super) adaptive_floor_warm_cap_effective: u64,
|
||||
pub(super) adaptive_floor_active_writers_current: u64,
|
||||
pub(super) adaptive_floor_warm_writers_current: u64,
|
||||
pub(super) me_keepalive_enabled: bool,
|
||||
pub(super) me_keepalive_interval_secs: u64,
|
||||
pub(super) me_keepalive_jitter_secs: u64,
|
||||
pub(super) me_keepalive_payload_random: bool,
|
||||
pub(super) rpc_proxy_req_every_secs: u64,
|
||||
pub(super) me_reconnect_max_concurrent_per_dc: u32,
|
||||
pub(super) me_reconnect_backoff_base_ms: u64,
|
||||
pub(super) me_reconnect_backoff_cap_ms: u64,
|
||||
pub(super) me_reconnect_fast_retry_count: u32,
|
||||
pub(super) me_pool_drain_ttl_secs: u64,
|
||||
pub(super) me_pool_force_close_secs: u64,
|
||||
pub(super) me_pool_min_fresh_ratio: f32,
|
||||
pub(super) me_bind_stale_mode: &'static str,
|
||||
pub(super) me_bind_stale_ttl_secs: u64,
|
||||
pub(super) me_single_endpoint_shadow_writers: u8,
|
||||
pub(super) me_single_endpoint_outage_mode_enabled: bool,
|
||||
pub(super) me_single_endpoint_outage_disable_quarantine: bool,
|
||||
pub(super) me_single_endpoint_outage_backoff_min_ms: u64,
|
||||
pub(super) me_single_endpoint_outage_backoff_max_ms: u64,
|
||||
pub(super) me_single_endpoint_shadow_rotate_every_secs: u64,
|
||||
pub(super) me_deterministic_writer_sort: bool,
|
||||
pub(super) me_writer_pick_mode: &'static str,
|
||||
pub(super) me_writer_pick_sample_size: u8,
|
||||
pub(super) me_socks_kdf_policy: &'static str,
|
||||
pub(super) quarantined_endpoints_total: usize,
|
||||
pub(super) quarantined_endpoints: Vec<MinimalQuarantineData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MinimalAllPayload {
|
||||
pub(super) me_writers: MeWritersData,
|
||||
pub(super) dcs: DcStatusData,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) me_runtime: Option<MinimalMeRuntimeData>,
|
||||
pub(super) network_path: Vec<MinimalDcPathData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MinimalAllData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<MinimalAllPayload>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct UserLinks {
|
||||
pub(super) classic: Vec<String>,
|
||||
pub(super) secure: Vec<String>,
|
||||
pub(super) tls: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct UserInfo {
|
||||
pub(super) username: String,
|
||||
pub(super) user_ad_tag: Option<String>,
|
||||
pub(super) max_tcp_conns: Option<usize>,
|
||||
pub(super) expiration_rfc3339: Option<String>,
|
||||
pub(super) data_quota_bytes: Option<u64>,
|
||||
pub(super) max_unique_ips: Option<usize>,
|
||||
pub(super) current_connections: u64,
|
||||
pub(super) active_unique_ips: usize,
|
||||
pub(super) active_unique_ips_list: Vec<IpAddr>,
|
||||
pub(super) recent_unique_ips: usize,
|
||||
pub(super) recent_unique_ips_list: Vec<IpAddr>,
|
||||
pub(super) total_octets: u64,
|
||||
pub(super) links: UserLinks,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct CreateUserResponse {
|
||||
pub(super) user: UserInfo,
|
||||
pub(super) secret: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(super) struct CreateUserRequest {
|
||||
pub(super) username: String,
|
||||
pub(super) secret: Option<String>,
|
||||
pub(super) user_ad_tag: Option<String>,
|
||||
pub(super) max_tcp_conns: Option<usize>,
|
||||
pub(super) expiration_rfc3339: Option<String>,
|
||||
pub(super) data_quota_bytes: Option<u64>,
|
||||
pub(super) max_unique_ips: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(super) struct PatchUserRequest {
|
||||
pub(super) secret: Option<String>,
|
||||
pub(super) user_ad_tag: Option<String>,
|
||||
pub(super) max_tcp_conns: Option<usize>,
|
||||
pub(super) expiration_rfc3339: Option<String>,
|
||||
pub(super) data_quota_bytes: Option<u64>,
|
||||
pub(super) max_unique_ips: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Default, Deserialize)]
|
||||
pub(super) struct RotateSecretRequest {
|
||||
pub(super) secret: Option<String>,
|
||||
}
|
||||
|
||||
pub(super) fn parse_optional_expiration(
|
||||
value: Option<&str>,
|
||||
) -> Result<Option<DateTime<Utc>>, ApiFailure> {
|
||||
let Some(raw) = value else {
|
||||
return Ok(None);
|
||||
};
|
||||
let parsed = DateTime::parse_from_rfc3339(raw)
|
||||
.map_err(|_| ApiFailure::bad_request("expiration_rfc3339 must be valid RFC3339"))?;
|
||||
Ok(Some(parsed.with_timezone(&Utc)))
|
||||
}
|
||||
|
||||
pub(super) fn is_valid_user_secret(secret: &str) -> bool {
|
||||
secret.len() == 32 && secret.chars().all(|c| c.is_ascii_hexdigit())
|
||||
}
|
||||
|
||||
pub(super) fn is_valid_ad_tag(tag: &str) -> bool {
|
||||
tag.len() == 32 && tag.chars().all(|c| c.is_ascii_hexdigit())
|
||||
}
|
||||
|
||||
pub(super) fn is_valid_username(user: &str) -> bool {
|
||||
!user.is_empty()
|
||||
&& user.len() <= MAX_USERNAME_LEN
|
||||
&& user
|
||||
.chars()
|
||||
.all(|ch| ch.is_ascii_alphanumeric() || matches!(ch, '_' | '-' | '.'))
|
||||
}
|
||||
|
||||
pub(super) fn random_user_secret() -> String {
|
||||
let mut bytes = [0u8; 16];
|
||||
rand::rng().fill(&mut bytes);
|
||||
hex::encode(bytes)
|
||||
}
|
||||
294
src/api/runtime_edge.rs
Normal file
294
src/api/runtime_edge.rs
Normal file
@@ -0,0 +1,294 @@
|
||||
use std::cmp::Reverse;
|
||||
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
|
||||
use super::ApiShared;
|
||||
use super::events::ApiEventRecord;
|
||||
|
||||
const FEATURE_DISABLED_REASON: &str = "feature_disabled";
|
||||
const SOURCE_UNAVAILABLE_REASON: &str = "source_unavailable";
|
||||
const EVENTS_DEFAULT_LIMIT: usize = 50;
|
||||
const EVENTS_MAX_LIMIT: usize = 1000;
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionUserData {
|
||||
pub(super) username: String,
|
||||
pub(super) current_connections: u64,
|
||||
pub(super) total_octets: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionTotalsData {
|
||||
pub(super) current_connections: u64,
|
||||
pub(super) current_connections_me: u64,
|
||||
pub(super) current_connections_direct: u64,
|
||||
pub(super) active_users: usize,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionTopData {
|
||||
pub(super) limit: usize,
|
||||
pub(super) by_connections: Vec<RuntimeEdgeConnectionUserData>,
|
||||
pub(super) by_throughput: Vec<RuntimeEdgeConnectionUserData>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionCacheData {
|
||||
pub(super) ttl_ms: u64,
|
||||
pub(super) served_from_cache: bool,
|
||||
pub(super) stale_cache_used: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionTelemetryData {
|
||||
pub(super) user_enabled: bool,
|
||||
pub(super) throughput_is_cumulative: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionsSummaryPayload {
|
||||
pub(super) cache: RuntimeEdgeConnectionCacheData,
|
||||
pub(super) totals: RuntimeEdgeConnectionTotalsData,
|
||||
pub(super) top: RuntimeEdgeConnectionTopData,
|
||||
pub(super) telemetry: RuntimeEdgeConnectionTelemetryData,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionsSummaryData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeEdgeConnectionsSummaryPayload>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct EdgeConnectionsCacheEntry {
|
||||
pub(super) expires_at: Instant,
|
||||
pub(super) payload: RuntimeEdgeConnectionsSummaryPayload,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeEdgeEventsPayload {
|
||||
pub(super) capacity: usize,
|
||||
pub(super) dropped_total: u64,
|
||||
pub(super) events: Vec<ApiEventRecord>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeEdgeEventsData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeEdgeEventsPayload>,
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_connections_summary_data(
|
||||
shared: &ApiShared,
|
||||
cfg: &ProxyConfig,
|
||||
) -> RuntimeEdgeConnectionsSummaryData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let api_cfg = &cfg.server.api;
|
||||
if !api_cfg.runtime_edge_enabled {
|
||||
return RuntimeEdgeConnectionsSummaryData {
|
||||
enabled: false,
|
||||
reason: Some(FEATURE_DISABLED_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
}
|
||||
|
||||
let (generated_at_epoch_secs, payload) = match get_connections_payload_cached(
|
||||
shared,
|
||||
api_cfg.runtime_edge_cache_ttl_ms,
|
||||
api_cfg.runtime_edge_top_n,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Some(v) => v,
|
||||
None => {
|
||||
return RuntimeEdgeConnectionsSummaryData {
|
||||
enabled: true,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
RuntimeEdgeConnectionsSummaryData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
data: Some(payload),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn build_runtime_events_recent_data(
|
||||
shared: &ApiShared,
|
||||
cfg: &ProxyConfig,
|
||||
query: Option<&str>,
|
||||
) -> RuntimeEdgeEventsData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let api_cfg = &cfg.server.api;
|
||||
if !api_cfg.runtime_edge_enabled {
|
||||
return RuntimeEdgeEventsData {
|
||||
enabled: false,
|
||||
reason: Some(FEATURE_DISABLED_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
}
|
||||
|
||||
let limit = parse_recent_events_limit(query, EVENTS_DEFAULT_LIMIT, EVENTS_MAX_LIMIT);
|
||||
let snapshot = shared.runtime_events.snapshot(limit);
|
||||
|
||||
RuntimeEdgeEventsData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: Some(RuntimeEdgeEventsPayload {
|
||||
capacity: snapshot.capacity,
|
||||
dropped_total: snapshot.dropped_total,
|
||||
events: snapshot.events,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_connections_payload_cached(
|
||||
shared: &ApiShared,
|
||||
cache_ttl_ms: u64,
|
||||
top_n: usize,
|
||||
) -> Option<(u64, RuntimeEdgeConnectionsSummaryPayload)> {
|
||||
if cache_ttl_ms > 0 {
|
||||
let now = Instant::now();
|
||||
let cached = shared.runtime_edge_connections_cache.lock().await.clone();
|
||||
if let Some(entry) = cached
|
||||
&& now < entry.expires_at
|
||||
{
|
||||
let mut payload = entry.payload;
|
||||
payload.cache.served_from_cache = true;
|
||||
payload.cache.stale_cache_used = false;
|
||||
return Some((entry.generated_at_epoch_secs, payload));
|
||||
}
|
||||
}
|
||||
|
||||
let Ok(_guard) = shared.runtime_edge_recompute_lock.try_lock() else {
|
||||
let cached = shared.runtime_edge_connections_cache.lock().await.clone();
|
||||
if let Some(entry) = cached {
|
||||
let mut payload = entry.payload;
|
||||
payload.cache.served_from_cache = true;
|
||||
payload.cache.stale_cache_used = true;
|
||||
return Some((entry.generated_at_epoch_secs, payload));
|
||||
}
|
||||
return None;
|
||||
};
|
||||
|
||||
let generated_at_epoch_secs = now_epoch_secs();
|
||||
let payload = recompute_connections_payload(shared, cache_ttl_ms, top_n).await;
|
||||
|
||||
if cache_ttl_ms > 0 {
|
||||
let entry = EdgeConnectionsCacheEntry {
|
||||
expires_at: Instant::now() + Duration::from_millis(cache_ttl_ms),
|
||||
payload: payload.clone(),
|
||||
generated_at_epoch_secs,
|
||||
};
|
||||
*shared.runtime_edge_connections_cache.lock().await = Some(entry);
|
||||
}
|
||||
|
||||
Some((generated_at_epoch_secs, payload))
|
||||
}
|
||||
|
||||
async fn recompute_connections_payload(
|
||||
shared: &ApiShared,
|
||||
cache_ttl_ms: u64,
|
||||
top_n: usize,
|
||||
) -> RuntimeEdgeConnectionsSummaryPayload {
|
||||
let mut rows = Vec::<RuntimeEdgeConnectionUserData>::new();
|
||||
let mut active_users = 0usize;
|
||||
for entry in shared.stats.iter_user_stats() {
|
||||
let user_stats = entry.value();
|
||||
let current_connections = user_stats
|
||||
.curr_connects
|
||||
.load(std::sync::atomic::Ordering::Relaxed);
|
||||
let total_octets = user_stats
|
||||
.octets_from_client
|
||||
.load(std::sync::atomic::Ordering::Relaxed)
|
||||
.saturating_add(
|
||||
user_stats
|
||||
.octets_to_client
|
||||
.load(std::sync::atomic::Ordering::Relaxed),
|
||||
);
|
||||
if current_connections > 0 {
|
||||
active_users = active_users.saturating_add(1);
|
||||
}
|
||||
rows.push(RuntimeEdgeConnectionUserData {
|
||||
username: entry.key().clone(),
|
||||
current_connections,
|
||||
total_octets,
|
||||
});
|
||||
}
|
||||
|
||||
let limit = top_n.max(1);
|
||||
let mut by_connections = rows.clone();
|
||||
by_connections.sort_by_key(|row| (Reverse(row.current_connections), row.username.clone()));
|
||||
by_connections.truncate(limit);
|
||||
|
||||
let mut by_throughput = rows;
|
||||
by_throughput.sort_by_key(|row| (Reverse(row.total_octets), row.username.clone()));
|
||||
by_throughput.truncate(limit);
|
||||
|
||||
let telemetry = shared.stats.telemetry_policy();
|
||||
RuntimeEdgeConnectionsSummaryPayload {
|
||||
cache: RuntimeEdgeConnectionCacheData {
|
||||
ttl_ms: cache_ttl_ms,
|
||||
served_from_cache: false,
|
||||
stale_cache_used: false,
|
||||
},
|
||||
totals: RuntimeEdgeConnectionTotalsData {
|
||||
current_connections: shared.stats.get_current_connections_total(),
|
||||
current_connections_me: shared.stats.get_current_connections_me(),
|
||||
current_connections_direct: shared.stats.get_current_connections_direct(),
|
||||
active_users,
|
||||
},
|
||||
top: RuntimeEdgeConnectionTopData {
|
||||
limit,
|
||||
by_connections,
|
||||
by_throughput,
|
||||
},
|
||||
telemetry: RuntimeEdgeConnectionTelemetryData {
|
||||
user_enabled: telemetry.user_enabled,
|
||||
throughput_is_cumulative: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_recent_events_limit(query: Option<&str>, default_limit: usize, max_limit: usize) -> usize {
|
||||
let Some(query) = query else {
|
||||
return default_limit;
|
||||
};
|
||||
for pair in query.split('&') {
|
||||
let mut split = pair.splitn(2, '=');
|
||||
if split.next() == Some("limit")
|
||||
&& let Some(raw) = split.next()
|
||||
&& let Ok(parsed) = raw.parse::<usize>()
|
||||
{
|
||||
return parsed.clamp(1, max_limit);
|
||||
}
|
||||
}
|
||||
default_limit
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
186
src/api/runtime_init.rs
Normal file
186
src/api/runtime_init.rs
Normal file
@@ -0,0 +1,186 @@
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::startup::{
|
||||
COMPONENT_ME_CONNECTIVITY_PING, COMPONENT_ME_POOL_CONSTRUCT, COMPONENT_ME_POOL_INIT_STAGE1,
|
||||
COMPONENT_ME_PROXY_CONFIG_V4, COMPONENT_ME_PROXY_CONFIG_V6, COMPONENT_ME_SECRET_FETCH,
|
||||
StartupComponentStatus, StartupMeStatus, compute_progress_pct,
|
||||
};
|
||||
|
||||
use super::ApiShared;
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeInitializationComponentData {
|
||||
pub(super) id: &'static str,
|
||||
pub(super) title: &'static str,
|
||||
pub(super) status: &'static str,
|
||||
pub(super) started_at_epoch_ms: Option<u64>,
|
||||
pub(super) finished_at_epoch_ms: Option<u64>,
|
||||
pub(super) duration_ms: Option<u64>,
|
||||
pub(super) attempts: u32,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) details: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeInitializationMeData {
|
||||
pub(super) status: &'static str,
|
||||
pub(super) current_stage: String,
|
||||
pub(super) progress_pct: f64,
|
||||
pub(super) init_attempt: u32,
|
||||
pub(super) retry_limit: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) last_error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeInitializationData {
|
||||
pub(super) status: &'static str,
|
||||
pub(super) degraded: bool,
|
||||
pub(super) current_stage: String,
|
||||
pub(super) progress_pct: f64,
|
||||
pub(super) started_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) ready_at_epoch_secs: Option<u64>,
|
||||
pub(super) total_elapsed_ms: u64,
|
||||
pub(super) transport_mode: String,
|
||||
pub(super) me: RuntimeInitializationMeData,
|
||||
pub(super) components: Vec<RuntimeInitializationComponentData>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(super) struct RuntimeStartupSummaryData {
|
||||
pub(super) status: &'static str,
|
||||
pub(super) stage: String,
|
||||
pub(super) progress_pct: f64,
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_startup_summary(shared: &ApiShared) -> RuntimeStartupSummaryData {
|
||||
let snapshot = shared.startup_tracker.snapshot().await;
|
||||
let me_pool_progress = current_me_pool_stage_progress(shared).await;
|
||||
let progress_pct = compute_progress_pct(&snapshot, me_pool_progress);
|
||||
RuntimeStartupSummaryData {
|
||||
status: snapshot.status.as_str(),
|
||||
stage: snapshot.current_stage,
|
||||
progress_pct,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_initialization_data(
|
||||
shared: &ApiShared,
|
||||
) -> RuntimeInitializationData {
|
||||
let snapshot = shared.startup_tracker.snapshot().await;
|
||||
let me_pool_progress = current_me_pool_stage_progress(shared).await;
|
||||
let progress_pct = compute_progress_pct(&snapshot, me_pool_progress);
|
||||
let me_progress_pct = compute_me_progress_pct(&snapshot, me_pool_progress);
|
||||
|
||||
RuntimeInitializationData {
|
||||
status: snapshot.status.as_str(),
|
||||
degraded: snapshot.degraded,
|
||||
current_stage: snapshot.current_stage,
|
||||
progress_pct,
|
||||
started_at_epoch_secs: snapshot.started_at_epoch_secs,
|
||||
ready_at_epoch_secs: snapshot.ready_at_epoch_secs,
|
||||
total_elapsed_ms: snapshot.total_elapsed_ms,
|
||||
transport_mode: snapshot.transport_mode,
|
||||
me: RuntimeInitializationMeData {
|
||||
status: snapshot.me.status.as_str(),
|
||||
current_stage: snapshot.me.current_stage,
|
||||
progress_pct: me_progress_pct,
|
||||
init_attempt: snapshot.me.init_attempt,
|
||||
retry_limit: snapshot.me.retry_limit,
|
||||
last_error: snapshot.me.last_error,
|
||||
},
|
||||
components: snapshot
|
||||
.components
|
||||
.into_iter()
|
||||
.map(|component| RuntimeInitializationComponentData {
|
||||
id: component.id,
|
||||
title: component.title,
|
||||
status: component.status.as_str(),
|
||||
started_at_epoch_ms: component.started_at_epoch_ms,
|
||||
finished_at_epoch_ms: component.finished_at_epoch_ms,
|
||||
duration_ms: component.duration_ms,
|
||||
attempts: component.attempts,
|
||||
details: component.details,
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_me_progress_pct(
|
||||
snapshot: &crate::startup::StartupSnapshot,
|
||||
me_pool_progress: Option<f64>,
|
||||
) -> f64 {
|
||||
match snapshot.me.status {
|
||||
StartupMeStatus::Pending => 0.0,
|
||||
StartupMeStatus::Ready | StartupMeStatus::Failed | StartupMeStatus::Skipped => 100.0,
|
||||
StartupMeStatus::Initializing => {
|
||||
let mut total_weight = 0.0f64;
|
||||
let mut completed_weight = 0.0f64;
|
||||
for component in &snapshot.components {
|
||||
if !is_me_component(component.id) {
|
||||
continue;
|
||||
}
|
||||
total_weight += component.weight;
|
||||
let unit_progress = match component.status {
|
||||
StartupComponentStatus::Pending => 0.0,
|
||||
StartupComponentStatus::Running => {
|
||||
if component.id == COMPONENT_ME_POOL_INIT_STAGE1 {
|
||||
me_pool_progress.unwrap_or(0.0).clamp(0.0, 1.0)
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
}
|
||||
StartupComponentStatus::Ready
|
||||
| StartupComponentStatus::Failed
|
||||
| StartupComponentStatus::Skipped => 1.0,
|
||||
};
|
||||
completed_weight += component.weight * unit_progress;
|
||||
}
|
||||
if total_weight <= f64::EPSILON {
|
||||
0.0
|
||||
} else {
|
||||
((completed_weight / total_weight) * 100.0).clamp(0.0, 100.0)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn is_me_component(component_id: &str) -> bool {
|
||||
matches!(
|
||||
component_id,
|
||||
COMPONENT_ME_SECRET_FETCH
|
||||
| COMPONENT_ME_PROXY_CONFIG_V4
|
||||
| COMPONENT_ME_PROXY_CONFIG_V6
|
||||
| COMPONENT_ME_POOL_CONSTRUCT
|
||||
| COMPONENT_ME_POOL_INIT_STAGE1
|
||||
| COMPONENT_ME_CONNECTIVITY_PING
|
||||
)
|
||||
}
|
||||
|
||||
async fn current_me_pool_stage_progress(shared: &ApiShared) -> Option<f64> {
|
||||
let snapshot = shared.startup_tracker.snapshot().await;
|
||||
if snapshot.me.status != StartupMeStatus::Initializing {
|
||||
return None;
|
||||
}
|
||||
|
||||
let pool = shared.me_pool.read().await.clone()?;
|
||||
let status = pool.api_status_snapshot().await;
|
||||
let configured_dc_groups = status.configured_dc_groups;
|
||||
let covered_dc_groups = status
|
||||
.dcs
|
||||
.iter()
|
||||
.filter(|dc| dc.alive_writers > 0)
|
||||
.count();
|
||||
|
||||
let dc_coverage = ratio_01(covered_dc_groups, configured_dc_groups);
|
||||
let writer_coverage = ratio_01(status.alive_writers, status.required_writers);
|
||||
Some((0.7 * dc_coverage + 0.3 * writer_coverage).clamp(0.0, 1.0))
|
||||
}
|
||||
|
||||
fn ratio_01(part: usize, total: usize) -> f64 {
|
||||
if total == 0 {
|
||||
return 0.0;
|
||||
}
|
||||
((part as f64) / (total as f64)).clamp(0.0, 1.0)
|
||||
}
|
||||
534
src/api/runtime_min.rs
Normal file
534
src/api/runtime_min.rs
Normal file
@@ -0,0 +1,534 @@
|
||||
use std::collections::BTreeSet;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
|
||||
use super::ApiShared;
|
||||
|
||||
const SOURCE_UNAVAILABLE_REASON: &str = "source_unavailable";
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct SecurityWhitelistData {
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) enabled: bool,
|
||||
pub(super) entries_total: usize,
|
||||
pub(super) entries: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateGenerationData {
|
||||
pub(super) active_generation: u64,
|
||||
pub(super) warm_generation: u64,
|
||||
pub(super) pending_hardswap_generation: u64,
|
||||
pub(super) pending_hardswap_age_secs: Option<u64>,
|
||||
pub(super) draining_generations: Vec<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateHardswapData {
|
||||
pub(super) enabled: bool,
|
||||
pub(super) pending: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateWriterContourData {
|
||||
pub(super) warm: usize,
|
||||
pub(super) active: usize,
|
||||
pub(super) draining: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateWriterHealthData {
|
||||
pub(super) healthy: usize,
|
||||
pub(super) degraded: usize,
|
||||
pub(super) draining: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateWriterData {
|
||||
pub(super) total: usize,
|
||||
pub(super) alive_non_draining: usize,
|
||||
pub(super) draining: usize,
|
||||
pub(super) degraded: usize,
|
||||
pub(super) contour: RuntimeMePoolStateWriterContourData,
|
||||
pub(super) health: RuntimeMePoolStateWriterHealthData,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateRefillDcData {
|
||||
pub(super) dc: i16,
|
||||
pub(super) family: &'static str,
|
||||
pub(super) inflight: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateRefillData {
|
||||
pub(super) inflight_endpoints_total: usize,
|
||||
pub(super) inflight_dc_total: usize,
|
||||
pub(super) by_dc: Vec<RuntimeMePoolStateRefillDcData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStatePayload {
|
||||
pub(super) generations: RuntimeMePoolStateGenerationData,
|
||||
pub(super) hardswap: RuntimeMePoolStateHardswapData,
|
||||
pub(super) writers: RuntimeMePoolStateWriterData,
|
||||
pub(super) refill: RuntimeMePoolStateRefillData,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeMePoolStatePayload>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityCountersData {
|
||||
pub(super) idle_close_by_peer_total: u64,
|
||||
pub(super) reader_eof_total: u64,
|
||||
pub(super) kdf_drift_total: u64,
|
||||
pub(super) kdf_port_only_drift_total: u64,
|
||||
pub(super) reconnect_attempt_total: u64,
|
||||
pub(super) reconnect_success_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityRouteDropData {
|
||||
pub(super) no_conn_total: u64,
|
||||
pub(super) channel_closed_total: u64,
|
||||
pub(super) queue_full_total: u64,
|
||||
pub(super) queue_full_base_total: u64,
|
||||
pub(super) queue_full_high_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityDcRttData {
|
||||
pub(super) dc: i16,
|
||||
pub(super) rtt_ema_ms: Option<f64>,
|
||||
pub(super) alive_writers: usize,
|
||||
pub(super) required_writers: usize,
|
||||
pub(super) coverage_pct: f64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityPayload {
|
||||
pub(super) counters: RuntimeMeQualityCountersData,
|
||||
pub(super) route_drops: RuntimeMeQualityRouteDropData,
|
||||
pub(super) dc_rtt: Vec<RuntimeMeQualityDcRttData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeMeQualityPayload>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityPolicyData {
|
||||
pub(super) connect_retry_attempts: u32,
|
||||
pub(super) connect_retry_backoff_ms: u64,
|
||||
pub(super) connect_budget_ms: u64,
|
||||
pub(super) unhealthy_fail_threshold: u32,
|
||||
pub(super) connect_failfast_hard_errors: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityCountersData {
|
||||
pub(super) connect_attempt_total: u64,
|
||||
pub(super) connect_success_total: u64,
|
||||
pub(super) connect_fail_total: u64,
|
||||
pub(super) connect_failfast_hard_error_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualitySummaryData {
|
||||
pub(super) configured_total: usize,
|
||||
pub(super) healthy_total: usize,
|
||||
pub(super) unhealthy_total: usize,
|
||||
pub(super) direct_total: usize,
|
||||
pub(super) socks4_total: usize,
|
||||
pub(super) socks5_total: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityDcData {
|
||||
pub(super) dc: i16,
|
||||
pub(super) latency_ema_ms: Option<f64>,
|
||||
pub(super) ip_preference: &'static str,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityUpstreamData {
|
||||
pub(super) upstream_id: usize,
|
||||
pub(super) route_kind: &'static str,
|
||||
pub(super) address: String,
|
||||
pub(super) weight: u16,
|
||||
pub(super) scopes: String,
|
||||
pub(super) healthy: bool,
|
||||
pub(super) fails: u32,
|
||||
pub(super) last_check_age_secs: u64,
|
||||
pub(super) effective_latency_ms: Option<f64>,
|
||||
pub(super) dc: Vec<RuntimeUpstreamQualityDcData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) policy: RuntimeUpstreamQualityPolicyData,
|
||||
pub(super) counters: RuntimeUpstreamQualityCountersData,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) summary: Option<RuntimeUpstreamQualitySummaryData>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) upstreams: Option<Vec<RuntimeUpstreamQualityUpstreamData>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunReflectionData {
|
||||
pub(super) addr: String,
|
||||
pub(super) age_secs: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunFlagsData {
|
||||
pub(super) nat_probe_enabled: bool,
|
||||
pub(super) nat_probe_disabled_runtime: bool,
|
||||
pub(super) nat_probe_attempts: u8,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunServersData {
|
||||
pub(super) configured: Vec<String>,
|
||||
pub(super) live: Vec<String>,
|
||||
pub(super) live_total: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunReflectionBlockData {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) v4: Option<RuntimeNatStunReflectionData>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) v6: Option<RuntimeNatStunReflectionData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunPayload {
|
||||
pub(super) flags: RuntimeNatStunFlagsData,
|
||||
pub(super) servers: RuntimeNatStunServersData,
|
||||
pub(super) reflection: RuntimeNatStunReflectionBlockData,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) stun_backoff_remaining_ms: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeNatStunPayload>,
|
||||
}
|
||||
|
||||
pub(super) fn build_security_whitelist_data(cfg: &ProxyConfig) -> SecurityWhitelistData {
|
||||
let entries = cfg
|
||||
.server
|
||||
.api
|
||||
.whitelist
|
||||
.iter()
|
||||
.map(ToString::to_string)
|
||||
.collect::<Vec<_>>();
|
||||
SecurityWhitelistData {
|
||||
generated_at_epoch_secs: now_epoch_secs(),
|
||||
enabled: !entries.is_empty(),
|
||||
entries_total: entries.len(),
|
||||
entries,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_me_pool_state_data(shared: &ApiShared) -> RuntimeMePoolStateData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let Some(pool) = shared.me_pool.read().await.clone() else {
|
||||
return RuntimeMePoolStateData {
|
||||
enabled: false,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
};
|
||||
|
||||
let status = pool.api_status_snapshot().await;
|
||||
let runtime = pool.api_runtime_snapshot().await;
|
||||
let refill = pool.api_refill_snapshot().await;
|
||||
|
||||
let mut draining_generations = BTreeSet::<u64>::new();
|
||||
let mut contour_warm = 0usize;
|
||||
let mut contour_active = 0usize;
|
||||
let mut contour_draining = 0usize;
|
||||
let mut draining = 0usize;
|
||||
let mut degraded = 0usize;
|
||||
let mut healthy = 0usize;
|
||||
|
||||
for writer in &status.writers {
|
||||
if writer.draining {
|
||||
draining_generations.insert(writer.generation);
|
||||
draining += 1;
|
||||
}
|
||||
if writer.degraded && !writer.draining {
|
||||
degraded += 1;
|
||||
}
|
||||
if !writer.degraded && !writer.draining {
|
||||
healthy += 1;
|
||||
}
|
||||
match writer.state {
|
||||
"warm" => contour_warm += 1,
|
||||
"active" => contour_active += 1,
|
||||
_ => contour_draining += 1,
|
||||
}
|
||||
}
|
||||
|
||||
RuntimeMePoolStateData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs: status.generated_at_epoch_secs,
|
||||
data: Some(RuntimeMePoolStatePayload {
|
||||
generations: RuntimeMePoolStateGenerationData {
|
||||
active_generation: runtime.active_generation,
|
||||
warm_generation: runtime.warm_generation,
|
||||
pending_hardswap_generation: runtime.pending_hardswap_generation,
|
||||
pending_hardswap_age_secs: runtime.pending_hardswap_age_secs,
|
||||
draining_generations: draining_generations.into_iter().collect(),
|
||||
},
|
||||
hardswap: RuntimeMePoolStateHardswapData {
|
||||
enabled: runtime.hardswap_enabled,
|
||||
pending: runtime.pending_hardswap_generation != 0,
|
||||
},
|
||||
writers: RuntimeMePoolStateWriterData {
|
||||
total: status.writers.len(),
|
||||
alive_non_draining: status.writers.len().saturating_sub(draining),
|
||||
draining,
|
||||
degraded,
|
||||
contour: RuntimeMePoolStateWriterContourData {
|
||||
warm: contour_warm,
|
||||
active: contour_active,
|
||||
draining: contour_draining,
|
||||
},
|
||||
health: RuntimeMePoolStateWriterHealthData {
|
||||
healthy,
|
||||
degraded,
|
||||
draining,
|
||||
},
|
||||
},
|
||||
refill: RuntimeMePoolStateRefillData {
|
||||
inflight_endpoints_total: refill.inflight_endpoints_total,
|
||||
inflight_dc_total: refill.inflight_dc_total,
|
||||
by_dc: refill
|
||||
.by_dc
|
||||
.into_iter()
|
||||
.map(|entry| RuntimeMePoolStateRefillDcData {
|
||||
dc: entry.dc,
|
||||
family: entry.family,
|
||||
inflight: entry.inflight,
|
||||
})
|
||||
.collect(),
|
||||
},
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_me_quality_data(shared: &ApiShared) -> RuntimeMeQualityData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let Some(pool) = shared.me_pool.read().await.clone() else {
|
||||
return RuntimeMeQualityData {
|
||||
enabled: false,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
};
|
||||
|
||||
let status = pool.api_status_snapshot().await;
|
||||
RuntimeMeQualityData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs: status.generated_at_epoch_secs,
|
||||
data: Some(RuntimeMeQualityPayload {
|
||||
counters: RuntimeMeQualityCountersData {
|
||||
idle_close_by_peer_total: shared.stats.get_me_idle_close_by_peer_total(),
|
||||
reader_eof_total: shared.stats.get_me_reader_eof_total(),
|
||||
kdf_drift_total: shared.stats.get_me_kdf_drift_total(),
|
||||
kdf_port_only_drift_total: shared.stats.get_me_kdf_port_only_drift_total(),
|
||||
reconnect_attempt_total: shared.stats.get_me_reconnect_attempts(),
|
||||
reconnect_success_total: shared.stats.get_me_reconnect_success(),
|
||||
},
|
||||
route_drops: RuntimeMeQualityRouteDropData {
|
||||
no_conn_total: shared.stats.get_me_route_drop_no_conn(),
|
||||
channel_closed_total: shared.stats.get_me_route_drop_channel_closed(),
|
||||
queue_full_total: shared.stats.get_me_route_drop_queue_full(),
|
||||
queue_full_base_total: shared.stats.get_me_route_drop_queue_full_base(),
|
||||
queue_full_high_total: shared.stats.get_me_route_drop_queue_full_high(),
|
||||
},
|
||||
dc_rtt: status
|
||||
.dcs
|
||||
.into_iter()
|
||||
.map(|dc| RuntimeMeQualityDcRttData {
|
||||
dc: dc.dc,
|
||||
rtt_ema_ms: dc.rtt_ms,
|
||||
alive_writers: dc.alive_writers,
|
||||
required_writers: dc.required_writers,
|
||||
coverage_pct: dc.coverage_pct,
|
||||
})
|
||||
.collect(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_upstream_quality_data(
|
||||
shared: &ApiShared,
|
||||
) -> RuntimeUpstreamQualityData {
|
||||
let generated_at_epoch_secs = now_epoch_secs();
|
||||
let policy = shared.upstream_manager.api_policy_snapshot();
|
||||
let counters = RuntimeUpstreamQualityCountersData {
|
||||
connect_attempt_total: shared.stats.get_upstream_connect_attempt_total(),
|
||||
connect_success_total: shared.stats.get_upstream_connect_success_total(),
|
||||
connect_fail_total: shared.stats.get_upstream_connect_fail_total(),
|
||||
connect_failfast_hard_error_total: shared.stats.get_upstream_connect_failfast_hard_error_total(),
|
||||
};
|
||||
|
||||
let Some(snapshot) = shared.upstream_manager.try_api_snapshot() else {
|
||||
return RuntimeUpstreamQualityData {
|
||||
enabled: false,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs,
|
||||
policy: RuntimeUpstreamQualityPolicyData {
|
||||
connect_retry_attempts: policy.connect_retry_attempts,
|
||||
connect_retry_backoff_ms: policy.connect_retry_backoff_ms,
|
||||
connect_budget_ms: policy.connect_budget_ms,
|
||||
unhealthy_fail_threshold: policy.unhealthy_fail_threshold,
|
||||
connect_failfast_hard_errors: policy.connect_failfast_hard_errors,
|
||||
},
|
||||
counters,
|
||||
summary: None,
|
||||
upstreams: None,
|
||||
};
|
||||
};
|
||||
|
||||
RuntimeUpstreamQualityData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
policy: RuntimeUpstreamQualityPolicyData {
|
||||
connect_retry_attempts: policy.connect_retry_attempts,
|
||||
connect_retry_backoff_ms: policy.connect_retry_backoff_ms,
|
||||
connect_budget_ms: policy.connect_budget_ms,
|
||||
unhealthy_fail_threshold: policy.unhealthy_fail_threshold,
|
||||
connect_failfast_hard_errors: policy.connect_failfast_hard_errors,
|
||||
},
|
||||
counters,
|
||||
summary: Some(RuntimeUpstreamQualitySummaryData {
|
||||
configured_total: snapshot.summary.configured_total,
|
||||
healthy_total: snapshot.summary.healthy_total,
|
||||
unhealthy_total: snapshot.summary.unhealthy_total,
|
||||
direct_total: snapshot.summary.direct_total,
|
||||
socks4_total: snapshot.summary.socks4_total,
|
||||
socks5_total: snapshot.summary.socks5_total,
|
||||
}),
|
||||
upstreams: Some(
|
||||
snapshot
|
||||
.upstreams
|
||||
.into_iter()
|
||||
.map(|upstream| RuntimeUpstreamQualityUpstreamData {
|
||||
upstream_id: upstream.upstream_id,
|
||||
route_kind: match upstream.route_kind {
|
||||
crate::transport::UpstreamRouteKind::Direct => "direct",
|
||||
crate::transport::UpstreamRouteKind::Socks4 => "socks4",
|
||||
crate::transport::UpstreamRouteKind::Socks5 => "socks5",
|
||||
},
|
||||
address: upstream.address,
|
||||
weight: upstream.weight,
|
||||
scopes: upstream.scopes,
|
||||
healthy: upstream.healthy,
|
||||
fails: upstream.fails,
|
||||
last_check_age_secs: upstream.last_check_age_secs,
|
||||
effective_latency_ms: upstream.effective_latency_ms,
|
||||
dc: upstream
|
||||
.dc
|
||||
.into_iter()
|
||||
.map(|dc| RuntimeUpstreamQualityDcData {
|
||||
dc: dc.dc,
|
||||
latency_ema_ms: dc.latency_ema_ms,
|
||||
ip_preference: match dc.ip_preference {
|
||||
crate::transport::upstream::IpPreference::Unknown => "unknown",
|
||||
crate::transport::upstream::IpPreference::PreferV6 => "prefer_v6",
|
||||
crate::transport::upstream::IpPreference::PreferV4 => "prefer_v4",
|
||||
crate::transport::upstream::IpPreference::BothWork => "both_work",
|
||||
crate::transport::upstream::IpPreference::Unavailable => "unavailable",
|
||||
},
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
.collect(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_nat_stun_data(shared: &ApiShared) -> RuntimeNatStunData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let Some(pool) = shared.me_pool.read().await.clone() else {
|
||||
return RuntimeNatStunData {
|
||||
enabled: false,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
};
|
||||
|
||||
let snapshot = pool.api_nat_stun_snapshot().await;
|
||||
RuntimeNatStunData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: Some(RuntimeNatStunPayload {
|
||||
flags: RuntimeNatStunFlagsData {
|
||||
nat_probe_enabled: snapshot.nat_probe_enabled,
|
||||
nat_probe_disabled_runtime: snapshot.nat_probe_disabled_runtime,
|
||||
nat_probe_attempts: snapshot.nat_probe_attempts,
|
||||
},
|
||||
servers: RuntimeNatStunServersData {
|
||||
configured: snapshot.configured_servers,
|
||||
live: snapshot.live_servers.clone(),
|
||||
live_total: snapshot.live_servers.len(),
|
||||
},
|
||||
reflection: RuntimeNatStunReflectionBlockData {
|
||||
v4: snapshot.reflection_v4.map(|entry| RuntimeNatStunReflectionData {
|
||||
addr: entry.addr.to_string(),
|
||||
age_secs: entry.age_secs,
|
||||
}),
|
||||
v6: snapshot.reflection_v6.map(|entry| RuntimeNatStunReflectionData {
|
||||
addr: entry.addr.to_string(),
|
||||
age_secs: entry.age_secs,
|
||||
}),
|
||||
},
|
||||
stun_backoff_remaining_ms: snapshot.stun_backoff_remaining_ms,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
526
src/api/runtime_stats.rs
Normal file
526
src/api/runtime_stats.rs
Normal file
@@ -0,0 +1,526 @@
|
||||
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use crate::config::ApiConfig;
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::upstream::IpPreference;
|
||||
use crate::transport::UpstreamRouteKind;
|
||||
|
||||
use super::ApiShared;
|
||||
use super::model::{
|
||||
DcEndpointWriters, DcStatus, DcStatusData, MeWriterStatus, MeWritersData, MeWritersSummary,
|
||||
MinimalAllData, MinimalAllPayload, MinimalDcPathData, MinimalMeRuntimeData,
|
||||
MinimalQuarantineData, UpstreamDcStatus, UpstreamStatus, UpstreamSummaryData, UpstreamsData,
|
||||
ZeroAllData, ZeroCodeCount, ZeroCoreData, ZeroDesyncData, ZeroMiddleProxyData, ZeroPoolData,
|
||||
ZeroUpstreamData,
|
||||
};
|
||||
|
||||
const FEATURE_DISABLED_REASON: &str = "feature_disabled";
|
||||
const SOURCE_UNAVAILABLE_REASON: &str = "source_unavailable";
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct MinimalCacheEntry {
|
||||
pub(super) expires_at: Instant,
|
||||
pub(super) payload: MinimalAllPayload,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
}
|
||||
|
||||
pub(super) fn build_zero_all_data(stats: &Stats, configured_users: usize) -> ZeroAllData {
|
||||
let telemetry = stats.telemetry_policy();
|
||||
let handshake_error_codes = stats
|
||||
.get_me_handshake_error_code_counts()
|
||||
.into_iter()
|
||||
.map(|(code, total)| ZeroCodeCount { code, total })
|
||||
.collect();
|
||||
|
||||
ZeroAllData {
|
||||
generated_at_epoch_secs: now_epoch_secs(),
|
||||
core: ZeroCoreData {
|
||||
uptime_seconds: stats.uptime_secs(),
|
||||
connections_total: stats.get_connects_all(),
|
||||
connections_bad_total: stats.get_connects_bad(),
|
||||
handshake_timeouts_total: stats.get_handshake_timeouts(),
|
||||
configured_users,
|
||||
telemetry_core_enabled: telemetry.core_enabled,
|
||||
telemetry_user_enabled: telemetry.user_enabled,
|
||||
telemetry_me_level: telemetry.me_level.to_string(),
|
||||
},
|
||||
upstream: build_zero_upstream_data(stats),
|
||||
middle_proxy: ZeroMiddleProxyData {
|
||||
keepalive_sent_total: stats.get_me_keepalive_sent(),
|
||||
keepalive_failed_total: stats.get_me_keepalive_failed(),
|
||||
keepalive_pong_total: stats.get_me_keepalive_pong(),
|
||||
keepalive_timeout_total: stats.get_me_keepalive_timeout(),
|
||||
rpc_proxy_req_signal_sent_total: stats.get_me_rpc_proxy_req_signal_sent_total(),
|
||||
rpc_proxy_req_signal_failed_total: stats.get_me_rpc_proxy_req_signal_failed_total(),
|
||||
rpc_proxy_req_signal_skipped_no_meta_total: stats
|
||||
.get_me_rpc_proxy_req_signal_skipped_no_meta_total(),
|
||||
rpc_proxy_req_signal_response_total: stats.get_me_rpc_proxy_req_signal_response_total(),
|
||||
rpc_proxy_req_signal_close_sent_total: stats
|
||||
.get_me_rpc_proxy_req_signal_close_sent_total(),
|
||||
reconnect_attempt_total: stats.get_me_reconnect_attempts(),
|
||||
reconnect_success_total: stats.get_me_reconnect_success(),
|
||||
handshake_reject_total: stats.get_me_handshake_reject_total(),
|
||||
handshake_error_codes,
|
||||
reader_eof_total: stats.get_me_reader_eof_total(),
|
||||
idle_close_by_peer_total: stats.get_me_idle_close_by_peer_total(),
|
||||
route_drop_no_conn_total: stats.get_me_route_drop_no_conn(),
|
||||
route_drop_channel_closed_total: stats.get_me_route_drop_channel_closed(),
|
||||
route_drop_queue_full_total: stats.get_me_route_drop_queue_full(),
|
||||
route_drop_queue_full_base_total: stats.get_me_route_drop_queue_full_base(),
|
||||
route_drop_queue_full_high_total: stats.get_me_route_drop_queue_full_high(),
|
||||
socks_kdf_strict_reject_total: stats.get_me_socks_kdf_strict_reject(),
|
||||
socks_kdf_compat_fallback_total: stats.get_me_socks_kdf_compat_fallback(),
|
||||
endpoint_quarantine_total: stats.get_me_endpoint_quarantine_total(),
|
||||
kdf_drift_total: stats.get_me_kdf_drift_total(),
|
||||
kdf_port_only_drift_total: stats.get_me_kdf_port_only_drift_total(),
|
||||
hardswap_pending_reuse_total: stats.get_me_hardswap_pending_reuse_total(),
|
||||
hardswap_pending_ttl_expired_total: stats.get_me_hardswap_pending_ttl_expired_total(),
|
||||
single_endpoint_outage_enter_total: stats.get_me_single_endpoint_outage_enter_total(),
|
||||
single_endpoint_outage_exit_total: stats.get_me_single_endpoint_outage_exit_total(),
|
||||
single_endpoint_outage_reconnect_attempt_total: stats
|
||||
.get_me_single_endpoint_outage_reconnect_attempt_total(),
|
||||
single_endpoint_outage_reconnect_success_total: stats
|
||||
.get_me_single_endpoint_outage_reconnect_success_total(),
|
||||
single_endpoint_quarantine_bypass_total: stats
|
||||
.get_me_single_endpoint_quarantine_bypass_total(),
|
||||
single_endpoint_shadow_rotate_total: stats.get_me_single_endpoint_shadow_rotate_total(),
|
||||
single_endpoint_shadow_rotate_skipped_quarantine_total: stats
|
||||
.get_me_single_endpoint_shadow_rotate_skipped_quarantine_total(),
|
||||
floor_mode_switch_total: stats.get_me_floor_mode_switch_total(),
|
||||
floor_mode_switch_static_to_adaptive_total: stats
|
||||
.get_me_floor_mode_switch_static_to_adaptive_total(),
|
||||
floor_mode_switch_adaptive_to_static_total: stats
|
||||
.get_me_floor_mode_switch_adaptive_to_static_total(),
|
||||
},
|
||||
pool: ZeroPoolData {
|
||||
pool_swap_total: stats.get_pool_swap_total(),
|
||||
pool_drain_active: stats.get_pool_drain_active(),
|
||||
pool_force_close_total: stats.get_pool_force_close_total(),
|
||||
pool_stale_pick_total: stats.get_pool_stale_pick_total(),
|
||||
writer_removed_total: stats.get_me_writer_removed_total(),
|
||||
writer_removed_unexpected_total: stats.get_me_writer_removed_unexpected_total(),
|
||||
refill_triggered_total: stats.get_me_refill_triggered_total(),
|
||||
refill_skipped_inflight_total: stats.get_me_refill_skipped_inflight_total(),
|
||||
refill_failed_total: stats.get_me_refill_failed_total(),
|
||||
writer_restored_same_endpoint_total: stats.get_me_writer_restored_same_endpoint_total(),
|
||||
writer_restored_fallback_total: stats.get_me_writer_restored_fallback_total(),
|
||||
},
|
||||
desync: ZeroDesyncData {
|
||||
secure_padding_invalid_total: stats.get_secure_padding_invalid(),
|
||||
desync_total: stats.get_desync_total(),
|
||||
desync_full_logged_total: stats.get_desync_full_logged(),
|
||||
desync_suppressed_total: stats.get_desync_suppressed(),
|
||||
desync_frames_bucket_0: stats.get_desync_frames_bucket_0(),
|
||||
desync_frames_bucket_1_2: stats.get_desync_frames_bucket_1_2(),
|
||||
desync_frames_bucket_3_10: stats.get_desync_frames_bucket_3_10(),
|
||||
desync_frames_bucket_gt_10: stats.get_desync_frames_bucket_gt_10(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn build_zero_upstream_data(stats: &Stats) -> ZeroUpstreamData {
|
||||
ZeroUpstreamData {
|
||||
connect_attempt_total: stats.get_upstream_connect_attempt_total(),
|
||||
connect_success_total: stats.get_upstream_connect_success_total(),
|
||||
connect_fail_total: stats.get_upstream_connect_fail_total(),
|
||||
connect_failfast_hard_error_total: stats.get_upstream_connect_failfast_hard_error_total(),
|
||||
connect_attempts_bucket_1: stats.get_upstream_connect_attempts_bucket_1(),
|
||||
connect_attempts_bucket_2: stats.get_upstream_connect_attempts_bucket_2(),
|
||||
connect_attempts_bucket_3_4: stats.get_upstream_connect_attempts_bucket_3_4(),
|
||||
connect_attempts_bucket_gt_4: stats.get_upstream_connect_attempts_bucket_gt_4(),
|
||||
connect_duration_success_bucket_le_100ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_le_100ms(),
|
||||
connect_duration_success_bucket_101_500ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_101_500ms(),
|
||||
connect_duration_success_bucket_501_1000ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_501_1000ms(),
|
||||
connect_duration_success_bucket_gt_1000ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_gt_1000ms(),
|
||||
connect_duration_fail_bucket_le_100ms: stats.get_upstream_connect_duration_fail_bucket_le_100ms(),
|
||||
connect_duration_fail_bucket_101_500ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_101_500ms(),
|
||||
connect_duration_fail_bucket_501_1000ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_501_1000ms(),
|
||||
connect_duration_fail_bucket_gt_1000ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_gt_1000ms(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn build_upstreams_data(shared: &ApiShared, api_cfg: &ApiConfig) -> UpstreamsData {
|
||||
let generated_at_epoch_secs = now_epoch_secs();
|
||||
let zero = build_zero_upstream_data(&shared.stats);
|
||||
if !api_cfg.minimal_runtime_enabled {
|
||||
return UpstreamsData {
|
||||
enabled: false,
|
||||
reason: Some(FEATURE_DISABLED_REASON),
|
||||
generated_at_epoch_secs,
|
||||
zero,
|
||||
summary: None,
|
||||
upstreams: None,
|
||||
};
|
||||
}
|
||||
|
||||
let Some(snapshot) = shared.upstream_manager.try_api_snapshot() else {
|
||||
return UpstreamsData {
|
||||
enabled: true,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs,
|
||||
zero,
|
||||
summary: None,
|
||||
upstreams: None,
|
||||
};
|
||||
};
|
||||
|
||||
let summary = UpstreamSummaryData {
|
||||
configured_total: snapshot.summary.configured_total,
|
||||
healthy_total: snapshot.summary.healthy_total,
|
||||
unhealthy_total: snapshot.summary.unhealthy_total,
|
||||
direct_total: snapshot.summary.direct_total,
|
||||
socks4_total: snapshot.summary.socks4_total,
|
||||
socks5_total: snapshot.summary.socks5_total,
|
||||
};
|
||||
let upstreams = snapshot
|
||||
.upstreams
|
||||
.into_iter()
|
||||
.map(|upstream| UpstreamStatus {
|
||||
upstream_id: upstream.upstream_id,
|
||||
route_kind: map_route_kind(upstream.route_kind),
|
||||
address: upstream.address,
|
||||
weight: upstream.weight,
|
||||
scopes: upstream.scopes,
|
||||
healthy: upstream.healthy,
|
||||
fails: upstream.fails,
|
||||
last_check_age_secs: upstream.last_check_age_secs,
|
||||
effective_latency_ms: upstream.effective_latency_ms,
|
||||
dc: upstream
|
||||
.dc
|
||||
.into_iter()
|
||||
.map(|dc| UpstreamDcStatus {
|
||||
dc: dc.dc,
|
||||
latency_ema_ms: dc.latency_ema_ms,
|
||||
ip_preference: map_ip_preference(dc.ip_preference),
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
UpstreamsData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
zero,
|
||||
summary: Some(summary),
|
||||
upstreams: Some(upstreams),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_minimal_all_data(
|
||||
shared: &ApiShared,
|
||||
api_cfg: &ApiConfig,
|
||||
) -> MinimalAllData {
|
||||
let now = now_epoch_secs();
|
||||
if !api_cfg.minimal_runtime_enabled {
|
||||
return MinimalAllData {
|
||||
enabled: false,
|
||||
reason: Some(FEATURE_DISABLED_REASON),
|
||||
generated_at_epoch_secs: now,
|
||||
data: None,
|
||||
};
|
||||
}
|
||||
|
||||
let Some((generated_at_epoch_secs, payload)) =
|
||||
get_minimal_payload_cached(shared, api_cfg.minimal_runtime_cache_ttl_ms).await
|
||||
else {
|
||||
return MinimalAllData {
|
||||
enabled: true,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now,
|
||||
data: Some(MinimalAllPayload {
|
||||
me_writers: disabled_me_writers(now, SOURCE_UNAVAILABLE_REASON),
|
||||
dcs: disabled_dcs(now, SOURCE_UNAVAILABLE_REASON),
|
||||
me_runtime: None,
|
||||
network_path: Vec::new(),
|
||||
}),
|
||||
};
|
||||
};
|
||||
|
||||
MinimalAllData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
data: Some(payload),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_me_writers_data(
|
||||
shared: &ApiShared,
|
||||
api_cfg: &ApiConfig,
|
||||
) -> MeWritersData {
|
||||
let now = now_epoch_secs();
|
||||
if !api_cfg.minimal_runtime_enabled {
|
||||
return disabled_me_writers(now, FEATURE_DISABLED_REASON);
|
||||
}
|
||||
|
||||
let Some((_, payload)) =
|
||||
get_minimal_payload_cached(shared, api_cfg.minimal_runtime_cache_ttl_ms).await
|
||||
else {
|
||||
return disabled_me_writers(now, SOURCE_UNAVAILABLE_REASON);
|
||||
};
|
||||
payload.me_writers
|
||||
}
|
||||
|
||||
pub(super) async fn build_dcs_data(shared: &ApiShared, api_cfg: &ApiConfig) -> DcStatusData {
|
||||
let now = now_epoch_secs();
|
||||
if !api_cfg.minimal_runtime_enabled {
|
||||
return disabled_dcs(now, FEATURE_DISABLED_REASON);
|
||||
}
|
||||
|
||||
let Some((_, payload)) =
|
||||
get_minimal_payload_cached(shared, api_cfg.minimal_runtime_cache_ttl_ms).await
|
||||
else {
|
||||
return disabled_dcs(now, SOURCE_UNAVAILABLE_REASON);
|
||||
};
|
||||
payload.dcs
|
||||
}
|
||||
|
||||
async fn get_minimal_payload_cached(
|
||||
shared: &ApiShared,
|
||||
cache_ttl_ms: u64,
|
||||
) -> Option<(u64, MinimalAllPayload)> {
|
||||
if cache_ttl_ms > 0 {
|
||||
let now = Instant::now();
|
||||
let cached = shared.minimal_cache.lock().await.clone();
|
||||
if let Some(entry) = cached
|
||||
&& now < entry.expires_at
|
||||
{
|
||||
return Some((entry.generated_at_epoch_secs, entry.payload));
|
||||
}
|
||||
}
|
||||
|
||||
let pool = shared.me_pool.read().await.clone()?;
|
||||
let status = pool.api_status_snapshot().await;
|
||||
let runtime = pool.api_runtime_snapshot().await;
|
||||
let generated_at_epoch_secs = status.generated_at_epoch_secs;
|
||||
|
||||
let me_writers = MeWritersData {
|
||||
middle_proxy_enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
summary: MeWritersSummary {
|
||||
configured_dc_groups: status.configured_dc_groups,
|
||||
configured_endpoints: status.configured_endpoints,
|
||||
available_endpoints: status.available_endpoints,
|
||||
available_pct: status.available_pct,
|
||||
required_writers: status.required_writers,
|
||||
alive_writers: status.alive_writers,
|
||||
coverage_pct: status.coverage_pct,
|
||||
},
|
||||
writers: status
|
||||
.writers
|
||||
.into_iter()
|
||||
.map(|entry| MeWriterStatus {
|
||||
writer_id: entry.writer_id,
|
||||
dc: entry.dc,
|
||||
endpoint: entry.endpoint.to_string(),
|
||||
generation: entry.generation,
|
||||
state: entry.state,
|
||||
draining: entry.draining,
|
||||
degraded: entry.degraded,
|
||||
bound_clients: entry.bound_clients,
|
||||
idle_for_secs: entry.idle_for_secs,
|
||||
rtt_ema_ms: entry.rtt_ema_ms,
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
let dcs = DcStatusData {
|
||||
middle_proxy_enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
dcs: status
|
||||
.dcs
|
||||
.into_iter()
|
||||
.map(|entry| DcStatus {
|
||||
dc: entry.dc,
|
||||
endpoints: entry
|
||||
.endpoints
|
||||
.into_iter()
|
||||
.map(|value| value.to_string())
|
||||
.collect(),
|
||||
endpoint_writers: entry
|
||||
.endpoint_writers
|
||||
.into_iter()
|
||||
.map(|coverage| DcEndpointWriters {
|
||||
endpoint: coverage.endpoint.to_string(),
|
||||
active_writers: coverage.active_writers,
|
||||
})
|
||||
.collect(),
|
||||
available_endpoints: entry.available_endpoints,
|
||||
available_pct: entry.available_pct,
|
||||
required_writers: entry.required_writers,
|
||||
floor_min: entry.floor_min,
|
||||
floor_target: entry.floor_target,
|
||||
floor_max: entry.floor_max,
|
||||
floor_capped: entry.floor_capped,
|
||||
alive_writers: entry.alive_writers,
|
||||
coverage_pct: entry.coverage_pct,
|
||||
rtt_ms: entry.rtt_ms,
|
||||
load: entry.load,
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
let me_runtime = MinimalMeRuntimeData {
|
||||
active_generation: runtime.active_generation,
|
||||
warm_generation: runtime.warm_generation,
|
||||
pending_hardswap_generation: runtime.pending_hardswap_generation,
|
||||
pending_hardswap_age_secs: runtime.pending_hardswap_age_secs,
|
||||
hardswap_enabled: runtime.hardswap_enabled,
|
||||
floor_mode: runtime.floor_mode,
|
||||
adaptive_floor_idle_secs: runtime.adaptive_floor_idle_secs,
|
||||
adaptive_floor_min_writers_single_endpoint: runtime
|
||||
.adaptive_floor_min_writers_single_endpoint,
|
||||
adaptive_floor_min_writers_multi_endpoint: runtime
|
||||
.adaptive_floor_min_writers_multi_endpoint,
|
||||
adaptive_floor_recover_grace_secs: runtime.adaptive_floor_recover_grace_secs,
|
||||
adaptive_floor_writers_per_core_total: runtime
|
||||
.adaptive_floor_writers_per_core_total,
|
||||
adaptive_floor_cpu_cores_override: runtime.adaptive_floor_cpu_cores_override,
|
||||
adaptive_floor_max_extra_writers_single_per_core: runtime
|
||||
.adaptive_floor_max_extra_writers_single_per_core,
|
||||
adaptive_floor_max_extra_writers_multi_per_core: runtime
|
||||
.adaptive_floor_max_extra_writers_multi_per_core,
|
||||
adaptive_floor_max_active_writers_per_core: runtime
|
||||
.adaptive_floor_max_active_writers_per_core,
|
||||
adaptive_floor_max_warm_writers_per_core: runtime
|
||||
.adaptive_floor_max_warm_writers_per_core,
|
||||
adaptive_floor_max_active_writers_global: runtime
|
||||
.adaptive_floor_max_active_writers_global,
|
||||
adaptive_floor_max_warm_writers_global: runtime
|
||||
.adaptive_floor_max_warm_writers_global,
|
||||
adaptive_floor_cpu_cores_detected: runtime.adaptive_floor_cpu_cores_detected,
|
||||
adaptive_floor_cpu_cores_effective: runtime.adaptive_floor_cpu_cores_effective,
|
||||
adaptive_floor_global_cap_raw: runtime.adaptive_floor_global_cap_raw,
|
||||
adaptive_floor_global_cap_effective: runtime.adaptive_floor_global_cap_effective,
|
||||
adaptive_floor_target_writers_total: runtime.adaptive_floor_target_writers_total,
|
||||
adaptive_floor_active_cap_configured: runtime.adaptive_floor_active_cap_configured,
|
||||
adaptive_floor_active_cap_effective: runtime.adaptive_floor_active_cap_effective,
|
||||
adaptive_floor_warm_cap_configured: runtime.adaptive_floor_warm_cap_configured,
|
||||
adaptive_floor_warm_cap_effective: runtime.adaptive_floor_warm_cap_effective,
|
||||
adaptive_floor_active_writers_current: runtime.adaptive_floor_active_writers_current,
|
||||
adaptive_floor_warm_writers_current: runtime.adaptive_floor_warm_writers_current,
|
||||
me_keepalive_enabled: runtime.me_keepalive_enabled,
|
||||
me_keepalive_interval_secs: runtime.me_keepalive_interval_secs,
|
||||
me_keepalive_jitter_secs: runtime.me_keepalive_jitter_secs,
|
||||
me_keepalive_payload_random: runtime.me_keepalive_payload_random,
|
||||
rpc_proxy_req_every_secs: runtime.rpc_proxy_req_every_secs,
|
||||
me_reconnect_max_concurrent_per_dc: runtime.me_reconnect_max_concurrent_per_dc,
|
||||
me_reconnect_backoff_base_ms: runtime.me_reconnect_backoff_base_ms,
|
||||
me_reconnect_backoff_cap_ms: runtime.me_reconnect_backoff_cap_ms,
|
||||
me_reconnect_fast_retry_count: runtime.me_reconnect_fast_retry_count,
|
||||
me_pool_drain_ttl_secs: runtime.me_pool_drain_ttl_secs,
|
||||
me_pool_force_close_secs: runtime.me_pool_force_close_secs,
|
||||
me_pool_min_fresh_ratio: runtime.me_pool_min_fresh_ratio,
|
||||
me_bind_stale_mode: runtime.me_bind_stale_mode,
|
||||
me_bind_stale_ttl_secs: runtime.me_bind_stale_ttl_secs,
|
||||
me_single_endpoint_shadow_writers: runtime.me_single_endpoint_shadow_writers,
|
||||
me_single_endpoint_outage_mode_enabled: runtime.me_single_endpoint_outage_mode_enabled,
|
||||
me_single_endpoint_outage_disable_quarantine: runtime
|
||||
.me_single_endpoint_outage_disable_quarantine,
|
||||
me_single_endpoint_outage_backoff_min_ms: runtime.me_single_endpoint_outage_backoff_min_ms,
|
||||
me_single_endpoint_outage_backoff_max_ms: runtime.me_single_endpoint_outage_backoff_max_ms,
|
||||
me_single_endpoint_shadow_rotate_every_secs: runtime
|
||||
.me_single_endpoint_shadow_rotate_every_secs,
|
||||
me_deterministic_writer_sort: runtime.me_deterministic_writer_sort,
|
||||
me_writer_pick_mode: runtime.me_writer_pick_mode,
|
||||
me_writer_pick_sample_size: runtime.me_writer_pick_sample_size,
|
||||
me_socks_kdf_policy: runtime.me_socks_kdf_policy,
|
||||
quarantined_endpoints_total: runtime.quarantined_endpoints.len(),
|
||||
quarantined_endpoints: runtime
|
||||
.quarantined_endpoints
|
||||
.into_iter()
|
||||
.map(|entry| MinimalQuarantineData {
|
||||
endpoint: entry.endpoint.to_string(),
|
||||
remaining_ms: entry.remaining_ms,
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
let network_path = runtime
|
||||
.network_path
|
||||
.into_iter()
|
||||
.map(|entry| MinimalDcPathData {
|
||||
dc: entry.dc,
|
||||
ip_preference: entry.ip_preference,
|
||||
selected_addr_v4: entry.selected_addr_v4.map(|value| value.to_string()),
|
||||
selected_addr_v6: entry.selected_addr_v6.map(|value| value.to_string()),
|
||||
})
|
||||
.collect();
|
||||
|
||||
let payload = MinimalAllPayload {
|
||||
me_writers,
|
||||
dcs,
|
||||
me_runtime: Some(me_runtime),
|
||||
network_path,
|
||||
};
|
||||
|
||||
if cache_ttl_ms > 0 {
|
||||
let entry = MinimalCacheEntry {
|
||||
expires_at: Instant::now() + Duration::from_millis(cache_ttl_ms),
|
||||
payload: payload.clone(),
|
||||
generated_at_epoch_secs,
|
||||
};
|
||||
*shared.minimal_cache.lock().await = Some(entry);
|
||||
}
|
||||
|
||||
Some((generated_at_epoch_secs, payload))
|
||||
}
|
||||
|
||||
fn disabled_me_writers(now_epoch_secs: u64, reason: &'static str) -> MeWritersData {
|
||||
MeWritersData {
|
||||
middle_proxy_enabled: false,
|
||||
reason: Some(reason),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
summary: MeWritersSummary {
|
||||
configured_dc_groups: 0,
|
||||
configured_endpoints: 0,
|
||||
available_endpoints: 0,
|
||||
available_pct: 0.0,
|
||||
required_writers: 0,
|
||||
alive_writers: 0,
|
||||
coverage_pct: 0.0,
|
||||
},
|
||||
writers: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn disabled_dcs(now_epoch_secs: u64, reason: &'static str) -> DcStatusData {
|
||||
DcStatusData {
|
||||
middle_proxy_enabled: false,
|
||||
reason: Some(reason),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
dcs: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_route_kind(value: UpstreamRouteKind) -> &'static str {
|
||||
match value {
|
||||
UpstreamRouteKind::Direct => "direct",
|
||||
UpstreamRouteKind::Socks4 => "socks4",
|
||||
UpstreamRouteKind::Socks5 => "socks5",
|
||||
}
|
||||
}
|
||||
|
||||
fn map_ip_preference(value: IpPreference) -> &'static str {
|
||||
match value {
|
||||
IpPreference::Unknown => "unknown",
|
||||
IpPreference::PreferV6 => "prefer_v6",
|
||||
IpPreference::PreferV4 => "prefer_v4",
|
||||
IpPreference::BothWork => "both_work",
|
||||
IpPreference::Unavailable => "unavailable",
|
||||
}
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
66
src/api/runtime_watch.rs
Normal file
66
src/api/runtime_watch.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use tokio::sync::watch;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
|
||||
use super::ApiRuntimeState;
|
||||
use super::events::ApiEventStore;
|
||||
|
||||
pub(super) fn spawn_runtime_watchers(
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
admission_rx: watch::Receiver<bool>,
|
||||
runtime_state: Arc<ApiRuntimeState>,
|
||||
runtime_events: Arc<ApiEventStore>,
|
||||
) {
|
||||
let mut config_rx_reload = config_rx;
|
||||
let runtime_state_reload = runtime_state.clone();
|
||||
let runtime_events_reload = runtime_events.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if config_rx_reload.changed().await.is_err() {
|
||||
break;
|
||||
}
|
||||
runtime_state_reload
|
||||
.config_reload_count
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
runtime_state_reload
|
||||
.last_config_reload_epoch_secs
|
||||
.store(now_epoch_secs(), Ordering::Relaxed);
|
||||
runtime_events_reload.record("config.reload.applied", "config receiver updated");
|
||||
}
|
||||
});
|
||||
|
||||
let mut admission_rx_watch = admission_rx;
|
||||
tokio::spawn(async move {
|
||||
runtime_state
|
||||
.admission_open
|
||||
.store(*admission_rx_watch.borrow(), Ordering::Relaxed);
|
||||
runtime_events.record(
|
||||
"admission.state",
|
||||
format!("accepting_new_connections={}", *admission_rx_watch.borrow()),
|
||||
);
|
||||
loop {
|
||||
if admission_rx_watch.changed().await.is_err() {
|
||||
break;
|
||||
}
|
||||
let admission_open = *admission_rx_watch.borrow();
|
||||
runtime_state
|
||||
.admission_open
|
||||
.store(admission_open, Ordering::Relaxed);
|
||||
runtime_events.record(
|
||||
"admission.state",
|
||||
format!("accepting_new_connections={}", admission_open),
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
287
src/api/runtime_zero.rs
Normal file
287
src/api/runtime_zero.rs
Normal file
@@ -0,0 +1,287 @@
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::config::{MeFloorMode, MeWriterPickMode, ProxyConfig, UserMaxUniqueIpsMode};
|
||||
|
||||
use super::ApiShared;
|
||||
use super::runtime_init::build_runtime_startup_summary;
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct SystemInfoData {
|
||||
pub(super) version: String,
|
||||
pub(super) target_arch: String,
|
||||
pub(super) target_os: String,
|
||||
pub(super) build_profile: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) git_commit: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) build_time_utc: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) rustc_version: Option<String>,
|
||||
pub(super) process_started_at_epoch_secs: u64,
|
||||
pub(super) uptime_seconds: f64,
|
||||
pub(super) config_path: String,
|
||||
pub(super) config_hash: String,
|
||||
pub(super) config_reload_count: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) last_config_reload_epoch_secs: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeGatesData {
|
||||
pub(super) accepting_new_connections: bool,
|
||||
pub(super) conditional_cast_enabled: bool,
|
||||
pub(super) me_runtime_ready: bool,
|
||||
pub(super) me2dc_fallback_enabled: bool,
|
||||
pub(super) use_middle_proxy: bool,
|
||||
pub(super) startup_status: &'static str,
|
||||
pub(super) startup_stage: String,
|
||||
pub(super) startup_progress_pct: f64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveTimeoutLimits {
|
||||
pub(super) client_handshake_secs: u64,
|
||||
pub(super) tg_connect_secs: u64,
|
||||
pub(super) client_keepalive_secs: u64,
|
||||
pub(super) client_ack_secs: u64,
|
||||
pub(super) me_one_retry: u8,
|
||||
pub(super) me_one_timeout_ms: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveUpstreamLimits {
|
||||
pub(super) connect_retry_attempts: u32,
|
||||
pub(super) connect_retry_backoff_ms: u64,
|
||||
pub(super) connect_budget_ms: u64,
|
||||
pub(super) unhealthy_fail_threshold: u32,
|
||||
pub(super) connect_failfast_hard_errors: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveMiddleProxyLimits {
|
||||
pub(super) floor_mode: &'static str,
|
||||
pub(super) adaptive_floor_idle_secs: u64,
|
||||
pub(super) adaptive_floor_min_writers_single_endpoint: u8,
|
||||
pub(super) adaptive_floor_min_writers_multi_endpoint: u8,
|
||||
pub(super) adaptive_floor_recover_grace_secs: u64,
|
||||
pub(super) adaptive_floor_writers_per_core_total: u16,
|
||||
pub(super) adaptive_floor_cpu_cores_override: u16,
|
||||
pub(super) adaptive_floor_max_extra_writers_single_per_core: u16,
|
||||
pub(super) adaptive_floor_max_extra_writers_multi_per_core: u16,
|
||||
pub(super) adaptive_floor_max_active_writers_per_core: u16,
|
||||
pub(super) adaptive_floor_max_warm_writers_per_core: u16,
|
||||
pub(super) adaptive_floor_max_active_writers_global: u32,
|
||||
pub(super) adaptive_floor_max_warm_writers_global: u32,
|
||||
pub(super) reconnect_max_concurrent_per_dc: u32,
|
||||
pub(super) reconnect_backoff_base_ms: u64,
|
||||
pub(super) reconnect_backoff_cap_ms: u64,
|
||||
pub(super) reconnect_fast_retry_count: u32,
|
||||
pub(super) writer_pick_mode: &'static str,
|
||||
pub(super) writer_pick_sample_size: u8,
|
||||
pub(super) me2dc_fallback: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveUserIpPolicyLimits {
|
||||
pub(super) mode: &'static str,
|
||||
pub(super) window_secs: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveLimitsData {
|
||||
pub(super) update_every_secs: u64,
|
||||
pub(super) me_reinit_every_secs: u64,
|
||||
pub(super) me_pool_force_close_secs: u64,
|
||||
pub(super) timeouts: EffectiveTimeoutLimits,
|
||||
pub(super) upstream: EffectiveUpstreamLimits,
|
||||
pub(super) middle_proxy: EffectiveMiddleProxyLimits,
|
||||
pub(super) user_ip_policy: EffectiveUserIpPolicyLimits,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct SecurityPostureData {
|
||||
pub(super) api_read_only: bool,
|
||||
pub(super) api_whitelist_enabled: bool,
|
||||
pub(super) api_whitelist_entries: usize,
|
||||
pub(super) api_auth_header_enabled: bool,
|
||||
pub(super) proxy_protocol_enabled: bool,
|
||||
pub(super) log_level: String,
|
||||
pub(super) telemetry_core_enabled: bool,
|
||||
pub(super) telemetry_user_enabled: bool,
|
||||
pub(super) telemetry_me_level: String,
|
||||
}
|
||||
|
||||
pub(super) fn build_system_info_data(
|
||||
shared: &ApiShared,
|
||||
_cfg: &ProxyConfig,
|
||||
revision: &str,
|
||||
) -> SystemInfoData {
|
||||
let last_reload_epoch_secs = shared
|
||||
.runtime_state
|
||||
.last_config_reload_epoch_secs
|
||||
.load(Ordering::Relaxed);
|
||||
let last_config_reload_epoch_secs = (last_reload_epoch_secs > 0).then_some(last_reload_epoch_secs);
|
||||
|
||||
let git_commit = option_env!("TELEMT_GIT_COMMIT")
|
||||
.or(option_env!("VERGEN_GIT_SHA"))
|
||||
.or(option_env!("GIT_COMMIT"))
|
||||
.map(ToString::to_string);
|
||||
let build_time_utc = option_env!("BUILD_TIME_UTC")
|
||||
.or(option_env!("VERGEN_BUILD_TIMESTAMP"))
|
||||
.map(ToString::to_string);
|
||||
let rustc_version = option_env!("RUSTC_VERSION")
|
||||
.or(option_env!("VERGEN_RUSTC_SEMVER"))
|
||||
.map(ToString::to_string);
|
||||
|
||||
SystemInfoData {
|
||||
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||
target_arch: std::env::consts::ARCH.to_string(),
|
||||
target_os: std::env::consts::OS.to_string(),
|
||||
build_profile: option_env!("PROFILE").unwrap_or("unknown").to_string(),
|
||||
git_commit,
|
||||
build_time_utc,
|
||||
rustc_version,
|
||||
process_started_at_epoch_secs: shared.runtime_state.process_started_at_epoch_secs,
|
||||
uptime_seconds: shared.stats.uptime_secs(),
|
||||
config_path: shared.config_path.display().to_string(),
|
||||
config_hash: revision.to_string(),
|
||||
config_reload_count: shared.runtime_state.config_reload_count.load(Ordering::Relaxed),
|
||||
last_config_reload_epoch_secs,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_gates_data(
|
||||
shared: &ApiShared,
|
||||
cfg: &ProxyConfig,
|
||||
) -> RuntimeGatesData {
|
||||
let startup_summary = build_runtime_startup_summary(shared).await;
|
||||
let me_runtime_ready = if !cfg.general.use_middle_proxy {
|
||||
true
|
||||
} else {
|
||||
shared
|
||||
.me_pool
|
||||
.read()
|
||||
.await
|
||||
.as_ref()
|
||||
.map(|pool| pool.is_runtime_ready())
|
||||
.unwrap_or(false)
|
||||
};
|
||||
|
||||
RuntimeGatesData {
|
||||
accepting_new_connections: shared.runtime_state.admission_open.load(Ordering::Relaxed),
|
||||
conditional_cast_enabled: cfg.general.use_middle_proxy,
|
||||
me_runtime_ready,
|
||||
me2dc_fallback_enabled: cfg.general.me2dc_fallback,
|
||||
use_middle_proxy: cfg.general.use_middle_proxy,
|
||||
startup_status: startup_summary.status,
|
||||
startup_stage: startup_summary.stage,
|
||||
startup_progress_pct: startup_summary.progress_pct,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn build_limits_effective_data(cfg: &ProxyConfig) -> EffectiveLimitsData {
|
||||
EffectiveLimitsData {
|
||||
update_every_secs: cfg.general.effective_update_every_secs(),
|
||||
me_reinit_every_secs: cfg.general.effective_me_reinit_every_secs(),
|
||||
me_pool_force_close_secs: cfg.general.effective_me_pool_force_close_secs(),
|
||||
timeouts: EffectiveTimeoutLimits {
|
||||
client_handshake_secs: cfg.timeouts.client_handshake,
|
||||
tg_connect_secs: cfg.timeouts.tg_connect,
|
||||
client_keepalive_secs: cfg.timeouts.client_keepalive,
|
||||
client_ack_secs: cfg.timeouts.client_ack,
|
||||
me_one_retry: cfg.timeouts.me_one_retry,
|
||||
me_one_timeout_ms: cfg.timeouts.me_one_timeout_ms,
|
||||
},
|
||||
upstream: EffectiveUpstreamLimits {
|
||||
connect_retry_attempts: cfg.general.upstream_connect_retry_attempts,
|
||||
connect_retry_backoff_ms: cfg.general.upstream_connect_retry_backoff_ms,
|
||||
connect_budget_ms: cfg.general.upstream_connect_budget_ms,
|
||||
unhealthy_fail_threshold: cfg.general.upstream_unhealthy_fail_threshold,
|
||||
connect_failfast_hard_errors: cfg.general.upstream_connect_failfast_hard_errors,
|
||||
},
|
||||
middle_proxy: EffectiveMiddleProxyLimits {
|
||||
floor_mode: me_floor_mode_label(cfg.general.me_floor_mode),
|
||||
adaptive_floor_idle_secs: cfg.general.me_adaptive_floor_idle_secs,
|
||||
adaptive_floor_min_writers_single_endpoint: cfg
|
||||
.general
|
||||
.me_adaptive_floor_min_writers_single_endpoint,
|
||||
adaptive_floor_min_writers_multi_endpoint: cfg
|
||||
.general
|
||||
.me_adaptive_floor_min_writers_multi_endpoint,
|
||||
adaptive_floor_recover_grace_secs: cfg.general.me_adaptive_floor_recover_grace_secs,
|
||||
adaptive_floor_writers_per_core_total: cfg
|
||||
.general
|
||||
.me_adaptive_floor_writers_per_core_total,
|
||||
adaptive_floor_cpu_cores_override: cfg
|
||||
.general
|
||||
.me_adaptive_floor_cpu_cores_override,
|
||||
adaptive_floor_max_extra_writers_single_per_core: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_extra_writers_single_per_core,
|
||||
adaptive_floor_max_extra_writers_multi_per_core: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_extra_writers_multi_per_core,
|
||||
adaptive_floor_max_active_writers_per_core: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_active_writers_per_core,
|
||||
adaptive_floor_max_warm_writers_per_core: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_warm_writers_per_core,
|
||||
adaptive_floor_max_active_writers_global: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_active_writers_global,
|
||||
adaptive_floor_max_warm_writers_global: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_warm_writers_global,
|
||||
reconnect_max_concurrent_per_dc: cfg.general.me_reconnect_max_concurrent_per_dc,
|
||||
reconnect_backoff_base_ms: cfg.general.me_reconnect_backoff_base_ms,
|
||||
reconnect_backoff_cap_ms: cfg.general.me_reconnect_backoff_cap_ms,
|
||||
reconnect_fast_retry_count: cfg.general.me_reconnect_fast_retry_count,
|
||||
writer_pick_mode: me_writer_pick_mode_label(cfg.general.me_writer_pick_mode),
|
||||
writer_pick_sample_size: cfg.general.me_writer_pick_sample_size,
|
||||
me2dc_fallback: cfg.general.me2dc_fallback,
|
||||
},
|
||||
user_ip_policy: EffectiveUserIpPolicyLimits {
|
||||
mode: user_max_unique_ips_mode_label(cfg.access.user_max_unique_ips_mode),
|
||||
window_secs: cfg.access.user_max_unique_ips_window_secs,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn build_security_posture_data(cfg: &ProxyConfig) -> SecurityPostureData {
|
||||
SecurityPostureData {
|
||||
api_read_only: cfg.server.api.read_only,
|
||||
api_whitelist_enabled: !cfg.server.api.whitelist.is_empty(),
|
||||
api_whitelist_entries: cfg.server.api.whitelist.len(),
|
||||
api_auth_header_enabled: !cfg.server.api.auth_header.is_empty(),
|
||||
proxy_protocol_enabled: cfg.server.proxy_protocol,
|
||||
log_level: cfg.general.log_level.to_string(),
|
||||
telemetry_core_enabled: cfg.general.telemetry.core_enabled,
|
||||
telemetry_user_enabled: cfg.general.telemetry.user_enabled,
|
||||
telemetry_me_level: cfg.general.telemetry.me_level.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn user_max_unique_ips_mode_label(mode: UserMaxUniqueIpsMode) -> &'static str {
|
||||
match mode {
|
||||
UserMaxUniqueIpsMode::ActiveWindow => "active_window",
|
||||
UserMaxUniqueIpsMode::TimeWindow => "time_window",
|
||||
UserMaxUniqueIpsMode::Combined => "combined",
|
||||
}
|
||||
}
|
||||
|
||||
fn me_floor_mode_label(mode: MeFloorMode) -> &'static str {
|
||||
match mode {
|
||||
MeFloorMode::Static => "static",
|
||||
MeFloorMode::Adaptive => "adaptive",
|
||||
}
|
||||
}
|
||||
|
||||
fn me_writer_pick_mode_label(mode: MeWriterPickMode) -> &'static str {
|
||||
match mode {
|
||||
MeWriterPickMode::SortedRr => "sorted_rr",
|
||||
MeWriterPickMode::P2c => "p2c",
|
||||
}
|
||||
}
|
||||
499
src/api/users.rs
Normal file
499
src/api/users.rs
Normal file
@@ -0,0 +1,499 @@
|
||||
use std::net::IpAddr;
|
||||
|
||||
use hyper::StatusCode;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::stats::Stats;
|
||||
|
||||
use super::ApiShared;
|
||||
use super::config_store::{
|
||||
ensure_expected_revision, load_config_from_disk, save_config_to_disk,
|
||||
};
|
||||
use super::model::{
|
||||
ApiFailure, CreateUserRequest, CreateUserResponse, PatchUserRequest, RotateSecretRequest,
|
||||
UserInfo, UserLinks, is_valid_ad_tag, is_valid_user_secret, is_valid_username,
|
||||
parse_optional_expiration, random_user_secret,
|
||||
};
|
||||
|
||||
pub(super) async fn create_user(
|
||||
body: CreateUserRequest,
|
||||
expected_revision: Option<String>,
|
||||
shared: &ApiShared,
|
||||
) -> Result<(CreateUserResponse, String), ApiFailure> {
|
||||
if !is_valid_username(&body.username) {
|
||||
return Err(ApiFailure::bad_request(
|
||||
"username must match [A-Za-z0-9_.-] and be 1..64 chars",
|
||||
));
|
||||
}
|
||||
|
||||
let secret = match body.secret {
|
||||
Some(secret) => {
|
||||
if !is_valid_user_secret(&secret) {
|
||||
return Err(ApiFailure::bad_request(
|
||||
"secret must be exactly 32 hex characters",
|
||||
));
|
||||
}
|
||||
secret
|
||||
}
|
||||
None => random_user_secret(),
|
||||
};
|
||||
|
||||
if let Some(ad_tag) = body.user_ad_tag.as_ref() && !is_valid_ad_tag(ad_tag) {
|
||||
return Err(ApiFailure::bad_request(
|
||||
"user_ad_tag must be exactly 32 hex characters",
|
||||
));
|
||||
}
|
||||
|
||||
let expiration = parse_optional_expiration(body.expiration_rfc3339.as_deref())?;
|
||||
let _guard = shared.mutation_lock.lock().await;
|
||||
let mut cfg = load_config_from_disk(&shared.config_path).await?;
|
||||
ensure_expected_revision(&shared.config_path, expected_revision.as_deref()).await?;
|
||||
|
||||
if cfg.access.users.contains_key(&body.username) {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::CONFLICT,
|
||||
"user_exists",
|
||||
"User already exists",
|
||||
));
|
||||
}
|
||||
|
||||
cfg.access.users.insert(body.username.clone(), secret.clone());
|
||||
if let Some(ad_tag) = body.user_ad_tag {
|
||||
cfg.access.user_ad_tags.insert(body.username.clone(), ad_tag);
|
||||
}
|
||||
if let Some(limit) = body.max_tcp_conns {
|
||||
cfg.access.user_max_tcp_conns.insert(body.username.clone(), limit);
|
||||
}
|
||||
if let Some(expiration) = expiration {
|
||||
cfg.access
|
||||
.user_expirations
|
||||
.insert(body.username.clone(), expiration);
|
||||
}
|
||||
if let Some(quota) = body.data_quota_bytes {
|
||||
cfg.access.user_data_quota.insert(body.username.clone(), quota);
|
||||
}
|
||||
|
||||
let updated_limit = body.max_unique_ips;
|
||||
if let Some(limit) = updated_limit {
|
||||
cfg.access
|
||||
.user_max_unique_ips
|
||||
.insert(body.username.clone(), limit);
|
||||
}
|
||||
|
||||
cfg.validate()
|
||||
.map_err(|e| ApiFailure::bad_request(format!("config validation failed: {}", e)))?;
|
||||
|
||||
let revision = save_config_to_disk(&shared.config_path, &cfg).await?;
|
||||
drop(_guard);
|
||||
|
||||
if let Some(limit) = updated_limit {
|
||||
shared.ip_tracker.set_user_limit(&body.username, limit).await;
|
||||
}
|
||||
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
shared.startup_detected_ip_v4,
|
||||
shared.startup_detected_ip_v6,
|
||||
)
|
||||
.await;
|
||||
let user = users
|
||||
.into_iter()
|
||||
.find(|entry| entry.username == body.username)
|
||||
.unwrap_or(UserInfo {
|
||||
username: body.username.clone(),
|
||||
user_ad_tag: None,
|
||||
max_tcp_conns: None,
|
||||
expiration_rfc3339: None,
|
||||
data_quota_bytes: None,
|
||||
max_unique_ips: updated_limit,
|
||||
current_connections: 0,
|
||||
active_unique_ips: 0,
|
||||
active_unique_ips_list: Vec::new(),
|
||||
recent_unique_ips: 0,
|
||||
recent_unique_ips_list: Vec::new(),
|
||||
total_octets: 0,
|
||||
links: build_user_links(
|
||||
&cfg,
|
||||
&secret,
|
||||
shared.startup_detected_ip_v4,
|
||||
shared.startup_detected_ip_v6,
|
||||
),
|
||||
});
|
||||
|
||||
Ok((CreateUserResponse { user, secret }, revision))
|
||||
}
|
||||
|
||||
pub(super) async fn patch_user(
|
||||
user: &str,
|
||||
body: PatchUserRequest,
|
||||
expected_revision: Option<String>,
|
||||
shared: &ApiShared,
|
||||
) -> Result<(UserInfo, String), ApiFailure> {
|
||||
if let Some(secret) = body.secret.as_ref() && !is_valid_user_secret(secret) {
|
||||
return Err(ApiFailure::bad_request(
|
||||
"secret must be exactly 32 hex characters",
|
||||
));
|
||||
}
|
||||
if let Some(ad_tag) = body.user_ad_tag.as_ref() && !is_valid_ad_tag(ad_tag) {
|
||||
return Err(ApiFailure::bad_request(
|
||||
"user_ad_tag must be exactly 32 hex characters",
|
||||
));
|
||||
}
|
||||
let expiration = parse_optional_expiration(body.expiration_rfc3339.as_deref())?;
|
||||
let _guard = shared.mutation_lock.lock().await;
|
||||
let mut cfg = load_config_from_disk(&shared.config_path).await?;
|
||||
ensure_expected_revision(&shared.config_path, expected_revision.as_deref()).await?;
|
||||
|
||||
if !cfg.access.users.contains_key(user) {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::NOT_FOUND,
|
||||
"not_found",
|
||||
"User not found",
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(secret) = body.secret {
|
||||
cfg.access.users.insert(user.to_string(), secret);
|
||||
}
|
||||
if let Some(ad_tag) = body.user_ad_tag {
|
||||
cfg.access.user_ad_tags.insert(user.to_string(), ad_tag);
|
||||
}
|
||||
if let Some(limit) = body.max_tcp_conns {
|
||||
cfg.access.user_max_tcp_conns.insert(user.to_string(), limit);
|
||||
}
|
||||
if let Some(expiration) = expiration {
|
||||
cfg.access.user_expirations.insert(user.to_string(), expiration);
|
||||
}
|
||||
if let Some(quota) = body.data_quota_bytes {
|
||||
cfg.access.user_data_quota.insert(user.to_string(), quota);
|
||||
}
|
||||
|
||||
let mut updated_limit = None;
|
||||
if let Some(limit) = body.max_unique_ips {
|
||||
cfg.access.user_max_unique_ips.insert(user.to_string(), limit);
|
||||
updated_limit = Some(limit);
|
||||
}
|
||||
|
||||
cfg.validate()
|
||||
.map_err(|e| ApiFailure::bad_request(format!("config validation failed: {}", e)))?;
|
||||
|
||||
let revision = save_config_to_disk(&shared.config_path, &cfg).await?;
|
||||
drop(_guard);
|
||||
if let Some(limit) = updated_limit {
|
||||
shared.ip_tracker.set_user_limit(user, limit).await;
|
||||
}
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
shared.startup_detected_ip_v4,
|
||||
shared.startup_detected_ip_v6,
|
||||
)
|
||||
.await;
|
||||
let user_info = users
|
||||
.into_iter()
|
||||
.find(|entry| entry.username == user)
|
||||
.ok_or_else(|| ApiFailure::internal("failed to build updated user view"))?;
|
||||
|
||||
Ok((user_info, revision))
|
||||
}
|
||||
|
||||
pub(super) async fn rotate_secret(
|
||||
user: &str,
|
||||
body: RotateSecretRequest,
|
||||
expected_revision: Option<String>,
|
||||
shared: &ApiShared,
|
||||
) -> Result<(CreateUserResponse, String), ApiFailure> {
|
||||
let secret = body.secret.unwrap_or_else(random_user_secret);
|
||||
if !is_valid_user_secret(&secret) {
|
||||
return Err(ApiFailure::bad_request(
|
||||
"secret must be exactly 32 hex characters",
|
||||
));
|
||||
}
|
||||
|
||||
let _guard = shared.mutation_lock.lock().await;
|
||||
let mut cfg = load_config_from_disk(&shared.config_path).await?;
|
||||
ensure_expected_revision(&shared.config_path, expected_revision.as_deref()).await?;
|
||||
|
||||
if !cfg.access.users.contains_key(user) {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::NOT_FOUND,
|
||||
"not_found",
|
||||
"User not found",
|
||||
));
|
||||
}
|
||||
|
||||
cfg.access.users.insert(user.to_string(), secret.clone());
|
||||
cfg.validate()
|
||||
.map_err(|e| ApiFailure::bad_request(format!("config validation failed: {}", e)))?;
|
||||
let revision = save_config_to_disk(&shared.config_path, &cfg).await?;
|
||||
drop(_guard);
|
||||
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
shared.startup_detected_ip_v4,
|
||||
shared.startup_detected_ip_v6,
|
||||
)
|
||||
.await;
|
||||
let user_info = users
|
||||
.into_iter()
|
||||
.find(|entry| entry.username == user)
|
||||
.ok_or_else(|| ApiFailure::internal("failed to build updated user view"))?;
|
||||
|
||||
Ok((
|
||||
CreateUserResponse {
|
||||
user: user_info,
|
||||
secret,
|
||||
},
|
||||
revision,
|
||||
))
|
||||
}
|
||||
|
||||
pub(super) async fn delete_user(
|
||||
user: &str,
|
||||
expected_revision: Option<String>,
|
||||
shared: &ApiShared,
|
||||
) -> Result<(String, String), ApiFailure> {
|
||||
let _guard = shared.mutation_lock.lock().await;
|
||||
let mut cfg = load_config_from_disk(&shared.config_path).await?;
|
||||
ensure_expected_revision(&shared.config_path, expected_revision.as_deref()).await?;
|
||||
|
||||
if !cfg.access.users.contains_key(user) {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::NOT_FOUND,
|
||||
"not_found",
|
||||
"User not found",
|
||||
));
|
||||
}
|
||||
if cfg.access.users.len() <= 1 {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::CONFLICT,
|
||||
"last_user_forbidden",
|
||||
"Cannot delete the last configured user",
|
||||
));
|
||||
}
|
||||
|
||||
cfg.access.users.remove(user);
|
||||
cfg.access.user_ad_tags.remove(user);
|
||||
cfg.access.user_max_tcp_conns.remove(user);
|
||||
cfg.access.user_expirations.remove(user);
|
||||
cfg.access.user_data_quota.remove(user);
|
||||
cfg.access.user_max_unique_ips.remove(user);
|
||||
|
||||
cfg.validate()
|
||||
.map_err(|e| ApiFailure::bad_request(format!("config validation failed: {}", e)))?;
|
||||
let revision = save_config_to_disk(&shared.config_path, &cfg).await?;
|
||||
drop(_guard);
|
||||
shared.ip_tracker.remove_user_limit(user).await;
|
||||
shared.ip_tracker.clear_user_ips(user).await;
|
||||
|
||||
Ok((user.to_string(), revision))
|
||||
}
|
||||
|
||||
pub(super) async fn users_from_config(
|
||||
cfg: &ProxyConfig,
|
||||
stats: &Stats,
|
||||
ip_tracker: &UserIpTracker,
|
||||
startup_detected_ip_v4: Option<IpAddr>,
|
||||
startup_detected_ip_v6: Option<IpAddr>,
|
||||
) -> Vec<UserInfo> {
|
||||
let mut names = cfg.access.users.keys().cloned().collect::<Vec<_>>();
|
||||
names.sort();
|
||||
let active_ip_lists = ip_tracker.get_active_ips_for_users(&names).await;
|
||||
let recent_ip_lists = ip_tracker.get_recent_ips_for_users(&names).await;
|
||||
|
||||
let mut users = Vec::with_capacity(names.len());
|
||||
for username in names {
|
||||
let active_ip_list = active_ip_lists
|
||||
.get(&username)
|
||||
.cloned()
|
||||
.unwrap_or_else(Vec::new);
|
||||
let recent_ip_list = recent_ip_lists
|
||||
.get(&username)
|
||||
.cloned()
|
||||
.unwrap_or_else(Vec::new);
|
||||
let links = cfg
|
||||
.access
|
||||
.users
|
||||
.get(&username)
|
||||
.map(|secret| {
|
||||
build_user_links(
|
||||
cfg,
|
||||
secret,
|
||||
startup_detected_ip_v4,
|
||||
startup_detected_ip_v6,
|
||||
)
|
||||
})
|
||||
.unwrap_or(UserLinks {
|
||||
classic: Vec::new(),
|
||||
secure: Vec::new(),
|
||||
tls: Vec::new(),
|
||||
});
|
||||
users.push(UserInfo {
|
||||
user_ad_tag: cfg.access.user_ad_tags.get(&username).cloned(),
|
||||
max_tcp_conns: cfg.access.user_max_tcp_conns.get(&username).copied(),
|
||||
expiration_rfc3339: cfg
|
||||
.access
|
||||
.user_expirations
|
||||
.get(&username)
|
||||
.map(chrono::DateTime::<chrono::Utc>::to_rfc3339),
|
||||
data_quota_bytes: cfg.access.user_data_quota.get(&username).copied(),
|
||||
max_unique_ips: cfg.access.user_max_unique_ips.get(&username).copied(),
|
||||
current_connections: stats.get_user_curr_connects(&username),
|
||||
active_unique_ips: active_ip_list.len(),
|
||||
active_unique_ips_list: active_ip_list,
|
||||
recent_unique_ips: recent_ip_list.len(),
|
||||
recent_unique_ips_list: recent_ip_list,
|
||||
total_octets: stats.get_user_total_octets(&username),
|
||||
links,
|
||||
username,
|
||||
});
|
||||
}
|
||||
users
|
||||
}
|
||||
|
||||
fn build_user_links(
|
||||
cfg: &ProxyConfig,
|
||||
secret: &str,
|
||||
startup_detected_ip_v4: Option<IpAddr>,
|
||||
startup_detected_ip_v6: Option<IpAddr>,
|
||||
) -> UserLinks {
|
||||
let hosts = resolve_link_hosts(cfg, startup_detected_ip_v4, startup_detected_ip_v6);
|
||||
let port = cfg.general.links.public_port.unwrap_or(cfg.server.port);
|
||||
let tls_domains = resolve_tls_domains(cfg);
|
||||
|
||||
let mut classic = Vec::new();
|
||||
let mut secure = Vec::new();
|
||||
let mut tls = Vec::new();
|
||||
|
||||
for host in &hosts {
|
||||
if cfg.general.modes.classic {
|
||||
classic.push(format!(
|
||||
"tg://proxy?server={}&port={}&secret={}",
|
||||
host, port, secret
|
||||
));
|
||||
}
|
||||
if cfg.general.modes.secure {
|
||||
secure.push(format!(
|
||||
"tg://proxy?server={}&port={}&secret=dd{}",
|
||||
host, port, secret
|
||||
));
|
||||
}
|
||||
if cfg.general.modes.tls {
|
||||
for domain in &tls_domains {
|
||||
let domain_hex = hex::encode(domain);
|
||||
tls.push(format!(
|
||||
"tg://proxy?server={}&port={}&secret=ee{}{}",
|
||||
host, port, secret, domain_hex
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
UserLinks {
|
||||
classic,
|
||||
secure,
|
||||
tls,
|
||||
}
|
||||
}
|
||||
|
||||
fn resolve_link_hosts(
|
||||
cfg: &ProxyConfig,
|
||||
startup_detected_ip_v4: Option<IpAddr>,
|
||||
startup_detected_ip_v6: Option<IpAddr>,
|
||||
) -> Vec<String> {
|
||||
if let Some(host) = cfg
|
||||
.general
|
||||
.links
|
||||
.public_host
|
||||
.as_deref()
|
||||
.map(str::trim)
|
||||
.filter(|value| !value.is_empty())
|
||||
{
|
||||
return vec![host.to_string()];
|
||||
}
|
||||
|
||||
let mut startup_hosts = Vec::new();
|
||||
if let Some(ip) = startup_detected_ip_v4 {
|
||||
push_unique_host(&mut startup_hosts, &ip.to_string());
|
||||
}
|
||||
if let Some(ip) = startup_detected_ip_v6 {
|
||||
push_unique_host(&mut startup_hosts, &ip.to_string());
|
||||
}
|
||||
if !startup_hosts.is_empty() {
|
||||
return startup_hosts;
|
||||
}
|
||||
|
||||
let mut hosts = Vec::new();
|
||||
for listener in &cfg.server.listeners {
|
||||
if let Some(host) = listener
|
||||
.announce
|
||||
.as_deref()
|
||||
.map(str::trim)
|
||||
.filter(|value| !value.is_empty())
|
||||
{
|
||||
push_unique_host(&mut hosts, host);
|
||||
continue;
|
||||
}
|
||||
if let Some(ip) = listener.announce_ip {
|
||||
if !ip.is_unspecified() {
|
||||
push_unique_host(&mut hosts, &ip.to_string());
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if !listener.ip.is_unspecified() {
|
||||
push_unique_host(&mut hosts, &listener.ip.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
if hosts.is_empty() {
|
||||
if let Some(host) = cfg.server.listen_addr_ipv4.as_deref() {
|
||||
push_host_from_legacy_listen(&mut hosts, host);
|
||||
}
|
||||
if let Some(host) = cfg.server.listen_addr_ipv6.as_deref() {
|
||||
push_host_from_legacy_listen(&mut hosts, host);
|
||||
}
|
||||
}
|
||||
|
||||
hosts
|
||||
}
|
||||
|
||||
fn push_host_from_legacy_listen(hosts: &mut Vec<String>, raw: &str) {
|
||||
let candidate = raw.trim();
|
||||
if candidate.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
match candidate.parse::<IpAddr>() {
|
||||
Ok(ip) if ip.is_unspecified() => {}
|
||||
Ok(ip) => push_unique_host(hosts, &ip.to_string()),
|
||||
Err(_) => push_unique_host(hosts, candidate),
|
||||
}
|
||||
}
|
||||
|
||||
fn push_unique_host(hosts: &mut Vec<String>, candidate: &str) {
|
||||
if !hosts.iter().any(|existing| existing == candidate) {
|
||||
hosts.push(candidate.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
fn resolve_tls_domains(cfg: &ProxyConfig) -> Vec<&str> {
|
||||
let mut domains = Vec::with_capacity(1 + cfg.censorship.tls_domains.len());
|
||||
let primary = cfg.censorship.tls_domain.as_str();
|
||||
if !primary.is_empty() {
|
||||
domains.push(primary);
|
||||
}
|
||||
for domain in &cfg.censorship.tls_domains {
|
||||
let value = domain.as_str();
|
||||
if value.is_empty() || domains.contains(&value) {
|
||||
continue;
|
||||
}
|
||||
domains.push(value);
|
||||
}
|
||||
domains
|
||||
}
|
||||
315
src/cli.rs
Normal file
315
src/cli.rs
Normal file
@@ -0,0 +1,315 @@
|
||||
//! CLI commands: --init (fire-and-forget setup)
|
||||
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command;
|
||||
use rand::Rng;
|
||||
|
||||
/// Options for the init command
|
||||
pub struct InitOptions {
|
||||
pub port: u16,
|
||||
pub domain: String,
|
||||
pub secret: Option<String>,
|
||||
pub username: String,
|
||||
pub config_dir: PathBuf,
|
||||
pub no_start: bool,
|
||||
}
|
||||
|
||||
impl Default for InitOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
port: 443,
|
||||
domain: "www.google.com".to_string(),
|
||||
secret: None,
|
||||
username: "user".to_string(),
|
||||
config_dir: PathBuf::from("/etc/telemt"),
|
||||
no_start: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse --init subcommand options from CLI args.
|
||||
///
|
||||
/// Returns `Some(InitOptions)` if `--init` was found, `None` otherwise.
|
||||
pub fn parse_init_args(args: &[String]) -> Option<InitOptions> {
|
||||
if !args.iter().any(|a| a == "--init") {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut opts = InitOptions::default();
|
||||
let mut i = 0;
|
||||
|
||||
while i < args.len() {
|
||||
match args[i].as_str() {
|
||||
"--port" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
opts.port = args[i].parse().unwrap_or(443);
|
||||
}
|
||||
}
|
||||
"--domain" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
opts.domain = args[i].clone();
|
||||
}
|
||||
}
|
||||
"--secret" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
opts.secret = Some(args[i].clone());
|
||||
}
|
||||
}
|
||||
"--user" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
opts.username = args[i].clone();
|
||||
}
|
||||
}
|
||||
"--config-dir" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
opts.config_dir = PathBuf::from(&args[i]);
|
||||
}
|
||||
}
|
||||
"--no-start" => {
|
||||
opts.no_start = true;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
Some(opts)
|
||||
}
|
||||
|
||||
/// Run the fire-and-forget setup.
|
||||
pub fn run_init(opts: InitOptions) -> Result<(), Box<dyn std::error::Error>> {
|
||||
eprintln!("[telemt] Fire-and-forget setup");
|
||||
eprintln!();
|
||||
|
||||
// 1. Generate or validate secret
|
||||
let secret = match opts.secret {
|
||||
Some(s) => {
|
||||
if s.len() != 32 || !s.chars().all(|c| c.is_ascii_hexdigit()) {
|
||||
eprintln!("[error] Secret must be exactly 32 hex characters");
|
||||
std::process::exit(1);
|
||||
}
|
||||
s
|
||||
}
|
||||
None => generate_secret(),
|
||||
};
|
||||
|
||||
eprintln!("[+] Secret: {}", secret);
|
||||
eprintln!("[+] User: {}", opts.username);
|
||||
eprintln!("[+] Port: {}", opts.port);
|
||||
eprintln!("[+] Domain: {}", opts.domain);
|
||||
|
||||
// 2. Create config directory
|
||||
fs::create_dir_all(&opts.config_dir)?;
|
||||
let config_path = opts.config_dir.join("config.toml");
|
||||
|
||||
// 3. Write config
|
||||
let config_content = generate_config(&opts.username, &secret, opts.port, &opts.domain);
|
||||
fs::write(&config_path, &config_content)?;
|
||||
eprintln!("[+] Config written to {}", config_path.display());
|
||||
|
||||
// 4. Write systemd unit
|
||||
let exe_path = std::env::current_exe()
|
||||
.unwrap_or_else(|_| PathBuf::from("/usr/local/bin/telemt"));
|
||||
|
||||
let unit_path = Path::new("/etc/systemd/system/telemt.service");
|
||||
let unit_content = generate_systemd_unit(&exe_path, &config_path);
|
||||
|
||||
match fs::write(unit_path, &unit_content) {
|
||||
Ok(()) => {
|
||||
eprintln!("[+] Systemd unit written to {}", unit_path.display());
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("[!] Cannot write systemd unit (run as root?): {}", e);
|
||||
eprintln!("[!] Manual unit file content:");
|
||||
eprintln!("{}", unit_content);
|
||||
|
||||
// Still print links and config
|
||||
print_links(&opts.username, &secret, opts.port, &opts.domain);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Reload systemd
|
||||
run_cmd("systemctl", &["daemon-reload"]);
|
||||
|
||||
// 6. Enable service
|
||||
run_cmd("systemctl", &["enable", "telemt.service"]);
|
||||
eprintln!("[+] Service enabled");
|
||||
|
||||
// 7. Start service (unless --no-start)
|
||||
if !opts.no_start {
|
||||
run_cmd("systemctl", &["start", "telemt.service"]);
|
||||
eprintln!("[+] Service started");
|
||||
|
||||
// Brief delay then check status
|
||||
std::thread::sleep(std::time::Duration::from_secs(1));
|
||||
let status = Command::new("systemctl")
|
||||
.args(["is-active", "telemt.service"])
|
||||
.output();
|
||||
|
||||
match status {
|
||||
Ok(out) if out.status.success() => {
|
||||
eprintln!("[+] Service is running");
|
||||
}
|
||||
_ => {
|
||||
eprintln!("[!] Service may not have started correctly");
|
||||
eprintln!("[!] Check: journalctl -u telemt.service -n 20");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
eprintln!("[+] Service not started (--no-start)");
|
||||
eprintln!("[+] Start manually: systemctl start telemt.service");
|
||||
}
|
||||
|
||||
eprintln!();
|
||||
|
||||
// 8. Print links
|
||||
print_links(&opts.username, &secret, opts.port, &opts.domain);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn generate_secret() -> String {
|
||||
let mut rng = rand::rng();
|
||||
let bytes: Vec<u8> = (0..16).map(|_| rng.random::<u8>()).collect();
|
||||
hex::encode(bytes)
|
||||
}
|
||||
|
||||
fn generate_config(username: &str, secret: &str, port: u16, domain: &str) -> String {
|
||||
format!(
|
||||
r#"# Telemt MTProxy — auto-generated config
|
||||
# Re-run `telemt --init` to regenerate
|
||||
|
||||
show_link = ["{username}"]
|
||||
|
||||
[general]
|
||||
# prefer_ipv6 is deprecated; use [network].prefer
|
||||
prefer_ipv6 = false
|
||||
fast_mode = true
|
||||
use_middle_proxy = false
|
||||
log_level = "normal"
|
||||
desync_all_full = false
|
||||
update_every = 43200
|
||||
hardswap = false
|
||||
me_pool_drain_ttl_secs = 90
|
||||
me_pool_min_fresh_ratio = 0.8
|
||||
me_reinit_drain_timeout_secs = 120
|
||||
|
||||
[network]
|
||||
ipv4 = true
|
||||
ipv6 = true
|
||||
prefer = 4
|
||||
multipath = false
|
||||
|
||||
[general.modes]
|
||||
classic = false
|
||||
secure = false
|
||||
tls = true
|
||||
|
||||
[server]
|
||||
port = {port}
|
||||
listen_addr_ipv4 = "0.0.0.0"
|
||||
listen_addr_ipv6 = "::"
|
||||
|
||||
[[server.listeners]]
|
||||
ip = "0.0.0.0"
|
||||
# reuse_allow = false # Set true only when intentionally running multiple telemt instances on same port
|
||||
|
||||
[[server.listeners]]
|
||||
ip = "::"
|
||||
|
||||
[timeouts]
|
||||
client_handshake = 15
|
||||
tg_connect = 10
|
||||
client_keepalive = 60
|
||||
client_ack = 300
|
||||
|
||||
[censorship]
|
||||
tls_domain = "{domain}"
|
||||
mask = true
|
||||
mask_port = 443
|
||||
fake_cert_len = 2048
|
||||
tls_full_cert_ttl_secs = 90
|
||||
|
||||
[access]
|
||||
replay_check_len = 65536
|
||||
replay_window_secs = 1800
|
||||
ignore_time_skew = false
|
||||
|
||||
[access.users]
|
||||
{username} = "{secret}"
|
||||
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
enabled = true
|
||||
weight = 10
|
||||
"#,
|
||||
username = username,
|
||||
secret = secret,
|
||||
port = port,
|
||||
domain = domain,
|
||||
)
|
||||
}
|
||||
|
||||
fn generate_systemd_unit(exe_path: &Path, config_path: &Path) -> String {
|
||||
format!(
|
||||
r#"[Unit]
|
||||
Description=Telemt MTProxy
|
||||
Documentation=https://github.com/nicepkg/telemt
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart={exe} {config}
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
LimitNOFILE=65535
|
||||
# Security hardening
|
||||
NoNewPrivileges=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/etc/telemt
|
||||
PrivateTmp=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
"#,
|
||||
exe = exe_path.display(),
|
||||
config = config_path.display(),
|
||||
)
|
||||
}
|
||||
|
||||
fn run_cmd(cmd: &str, args: &[&str]) {
|
||||
match Command::new(cmd).args(args).output() {
|
||||
Ok(output) => {
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
eprintln!("[!] {} {} failed: {}", cmd, args.join(" "), stderr.trim());
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("[!] Failed to run {} {}: {}", cmd, args.join(" "), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn print_links(username: &str, secret: &str, port: u16, domain: &str) {
|
||||
let domain_hex = hex::encode(domain);
|
||||
|
||||
println!("=== Proxy Links ===");
|
||||
println!("[{}]", username);
|
||||
println!(" EE-TLS: tg://proxy?server=YOUR_SERVER_IP&port={}&secret=ee{}{}",
|
||||
port, secret, domain_hex);
|
||||
println!();
|
||||
println!("Replace YOUR_SERVER_IP with your server's public IP.");
|
||||
println!("The proxy will auto-detect and display the correct link on startup.");
|
||||
println!("Check: journalctl -u telemt.service | head -30");
|
||||
println!("===================");
|
||||
}
|
||||
631
src/config/defaults.rs
Normal file
631
src/config/defaults.rs
Normal file
@@ -0,0 +1,631 @@
|
||||
use std::collections::HashMap;
|
||||
use ipnetwork::IpNetwork;
|
||||
use serde::Deserialize;
|
||||
|
||||
// Helper defaults kept private to the config module.
|
||||
const DEFAULT_NETWORK_IPV6: Option<bool> = Some(false);
|
||||
const DEFAULT_STUN_TCP_FALLBACK: bool = true;
|
||||
const DEFAULT_MIDDLE_PROXY_WARM_STANDBY: usize = 16;
|
||||
const DEFAULT_ME_RECONNECT_MAX_CONCURRENT_PER_DC: u32 = 8;
|
||||
const DEFAULT_ME_RECONNECT_FAST_RETRY_COUNT: u32 = 16;
|
||||
const DEFAULT_ME_SINGLE_ENDPOINT_SHADOW_WRITERS: u8 = 2;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_IDLE_SECS: u64 = 90;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MIN_WRITERS_SINGLE_ENDPOINT: u8 = 1;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MIN_WRITERS_MULTI_ENDPOINT: u8 = 1;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_RECOVER_GRACE_SECS: u64 = 180;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_WRITERS_PER_CORE_TOTAL: u16 = 48;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_CPU_CORES_OVERRIDE: u16 = 0;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_EXTRA_WRITERS_SINGLE_PER_CORE: u16 = 1;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_EXTRA_WRITERS_MULTI_PER_CORE: u16 = 2;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_ACTIVE_WRITERS_PER_CORE: u16 = 64;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_WARM_WRITERS_PER_CORE: u16 = 64;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_ACTIVE_WRITERS_GLOBAL: u32 = 256;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_WARM_WRITERS_GLOBAL: u32 = 256;
|
||||
const DEFAULT_ME_WRITER_CMD_CHANNEL_CAPACITY: usize = 1024;
|
||||
const DEFAULT_ME_ROUTE_CHANNEL_CAPACITY: usize = 512;
|
||||
const DEFAULT_ME_C2ME_CHANNEL_CAPACITY: usize = 256;
|
||||
const DEFAULT_ME_WRITER_PICK_SAMPLE_SIZE: u8 = 3;
|
||||
const DEFAULT_ME_HEALTH_INTERVAL_MS_UNHEALTHY: u64 = 1000;
|
||||
const DEFAULT_ME_HEALTH_INTERVAL_MS_HEALTHY: u64 = 3000;
|
||||
const DEFAULT_ME_ADMISSION_POLL_MS: u64 = 1000;
|
||||
const DEFAULT_ME_WARN_RATE_LIMIT_MS: u64 = 5000;
|
||||
const DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS: u64 = 30;
|
||||
const DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS: u32 = 2;
|
||||
const DEFAULT_UPSTREAM_UNHEALTHY_FAIL_THRESHOLD: u32 = 5;
|
||||
const DEFAULT_UPSTREAM_CONNECT_BUDGET_MS: u64 = 3000;
|
||||
const DEFAULT_LISTEN_ADDR_IPV6: &str = "::";
|
||||
const DEFAULT_ACCESS_USER: &str = "default";
|
||||
const DEFAULT_ACCESS_SECRET: &str = "00000000000000000000000000000000";
|
||||
|
||||
pub(crate) fn default_true() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_port() -> u16 {
|
||||
443
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_domain() -> String {
|
||||
"petrovich.ru".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_port() -> u16 {
|
||||
443
|
||||
}
|
||||
|
||||
pub(crate) fn default_fake_cert_len() -> usize {
|
||||
2048
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_front_dir() -> String {
|
||||
"tlsfront".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_replay_check_len() -> usize {
|
||||
65_536
|
||||
}
|
||||
|
||||
pub(crate) fn default_replay_window_secs() -> u64 {
|
||||
1800
|
||||
}
|
||||
|
||||
pub(crate) fn default_handshake_timeout() -> u64 {
|
||||
30
|
||||
}
|
||||
|
||||
pub(crate) fn default_connect_timeout() -> u64 {
|
||||
10
|
||||
}
|
||||
|
||||
pub(crate) fn default_keepalive() -> u64 {
|
||||
60
|
||||
}
|
||||
|
||||
pub(crate) fn default_ack_timeout() -> u64 {
|
||||
300
|
||||
}
|
||||
pub(crate) fn default_me_one_retry() -> u8 {
|
||||
12
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_one_timeout() -> u64 {
|
||||
1200
|
||||
}
|
||||
|
||||
pub(crate) fn default_listen_addr() -> String {
|
||||
"0.0.0.0".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_listen_addr_ipv4() -> Option<String> {
|
||||
Some(default_listen_addr())
|
||||
}
|
||||
|
||||
pub(crate) fn default_weight() -> u16 {
|
||||
1
|
||||
}
|
||||
|
||||
pub(crate) fn default_metrics_whitelist() -> Vec<IpNetwork> {
|
||||
vec![
|
||||
"127.0.0.1/32".parse().unwrap(),
|
||||
"::1/128".parse().unwrap(),
|
||||
]
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_listen() -> String {
|
||||
"127.0.0.1:9091".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_whitelist() -> Vec<IpNetwork> {
|
||||
default_metrics_whitelist()
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_request_body_limit_bytes() -> usize {
|
||||
64 * 1024
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_minimal_runtime_enabled() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_minimal_runtime_cache_ttl_ms() -> u64 {
|
||||
1000
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_runtime_edge_enabled() -> bool { false }
|
||||
pub(crate) fn default_api_runtime_edge_cache_ttl_ms() -> u64 { 1000 }
|
||||
pub(crate) fn default_api_runtime_edge_top_n() -> usize { 10 }
|
||||
pub(crate) fn default_api_runtime_edge_events_capacity() -> usize { 256 }
|
||||
|
||||
pub(crate) fn default_proxy_protocol_header_timeout_ms() -> u64 {
|
||||
500
|
||||
}
|
||||
|
||||
pub(crate) fn default_prefer_4() -> u8 {
|
||||
4
|
||||
}
|
||||
|
||||
pub(crate) fn default_network_ipv6() -> Option<bool> {
|
||||
DEFAULT_NETWORK_IPV6
|
||||
}
|
||||
|
||||
pub(crate) fn default_stun_tcp_fallback() -> bool {
|
||||
DEFAULT_STUN_TCP_FALLBACK
|
||||
}
|
||||
|
||||
pub(crate) fn default_unknown_dc_log_path() -> Option<String> {
|
||||
Some("unknown-dc.txt".to_string())
|
||||
}
|
||||
|
||||
pub(crate) fn default_unknown_dc_file_log_enabled() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_pool_size() -> usize {
|
||||
8
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_secret_path() -> Option<String> {
|
||||
Some("proxy-secret".to_string())
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_config_v4_cache_path() -> Option<String> {
|
||||
Some("cache/proxy-config-v4.txt".to_string())
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_config_v6_cache_path() -> Option<String> {
|
||||
Some("cache/proxy-config-v6.txt".to_string())
|
||||
}
|
||||
|
||||
pub(crate) fn default_middle_proxy_nat_stun() -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
||||
pub(crate) fn default_middle_proxy_nat_stun_servers() -> Vec<String> {
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
pub(crate) fn default_stun_nat_probe_concurrency() -> usize {
|
||||
8
|
||||
}
|
||||
|
||||
pub(crate) fn default_middle_proxy_warm_standby() -> usize {
|
||||
DEFAULT_MIDDLE_PROXY_WARM_STANDBY
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_init_retry_attempts() -> u32 {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_me2dc_fallback() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_keepalive_interval() -> u64 {
|
||||
8
|
||||
}
|
||||
|
||||
pub(crate) fn default_keepalive_jitter() -> u64 {
|
||||
2
|
||||
}
|
||||
|
||||
pub(crate) fn default_warmup_step_delay_ms() -> u64 {
|
||||
500
|
||||
}
|
||||
|
||||
pub(crate) fn default_warmup_step_jitter_ms() -> u64 {
|
||||
300
|
||||
}
|
||||
|
||||
pub(crate) fn default_reconnect_backoff_base_ms() -> u64 {
|
||||
500
|
||||
}
|
||||
|
||||
pub(crate) fn default_reconnect_backoff_cap_ms() -> u64 {
|
||||
30_000
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_reconnect_max_concurrent_per_dc() -> u32 {
|
||||
DEFAULT_ME_RECONNECT_MAX_CONCURRENT_PER_DC
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_reconnect_fast_retry_count() -> u32 {
|
||||
DEFAULT_ME_RECONNECT_FAST_RETRY_COUNT
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_single_endpoint_shadow_writers() -> u8 {
|
||||
DEFAULT_ME_SINGLE_ENDPOINT_SHADOW_WRITERS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_single_endpoint_outage_mode_enabled() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_single_endpoint_outage_disable_quarantine() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_single_endpoint_outage_backoff_min_ms() -> u64 {
|
||||
250
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_single_endpoint_outage_backoff_max_ms() -> u64 {
|
||||
3000
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_single_endpoint_shadow_rotate_every_secs() -> u64 {
|
||||
900
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_idle_secs() -> u64 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_IDLE_SECS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_min_writers_single_endpoint() -> u8 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MIN_WRITERS_SINGLE_ENDPOINT
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_min_writers_multi_endpoint() -> u8 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MIN_WRITERS_MULTI_ENDPOINT
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_recover_grace_secs() -> u64 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_RECOVER_GRACE_SECS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_writers_per_core_total() -> u16 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_WRITERS_PER_CORE_TOTAL
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_cpu_cores_override() -> u16 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_CPU_CORES_OVERRIDE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_max_extra_writers_single_per_core() -> u16 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_EXTRA_WRITERS_SINGLE_PER_CORE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_max_extra_writers_multi_per_core() -> u16 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_EXTRA_WRITERS_MULTI_PER_CORE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_max_active_writers_per_core() -> u16 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_ACTIVE_WRITERS_PER_CORE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_max_warm_writers_per_core() -> u16 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_WARM_WRITERS_PER_CORE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_max_active_writers_global() -> u32 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_ACTIVE_WRITERS_GLOBAL
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_max_warm_writers_global() -> u32 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_WARM_WRITERS_GLOBAL
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_writer_cmd_channel_capacity() -> usize {
|
||||
DEFAULT_ME_WRITER_CMD_CHANNEL_CAPACITY
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_channel_capacity() -> usize {
|
||||
DEFAULT_ME_ROUTE_CHANNEL_CAPACITY
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_c2me_channel_capacity() -> usize {
|
||||
DEFAULT_ME_C2ME_CHANNEL_CAPACITY
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_writer_pick_sample_size() -> u8 {
|
||||
DEFAULT_ME_WRITER_PICK_SAMPLE_SIZE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_health_interval_ms_unhealthy() -> u64 {
|
||||
DEFAULT_ME_HEALTH_INTERVAL_MS_UNHEALTHY
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_health_interval_ms_healthy() -> u64 {
|
||||
DEFAULT_ME_HEALTH_INTERVAL_MS_HEALTHY
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_admission_poll_ms() -> u64 {
|
||||
DEFAULT_ME_ADMISSION_POLL_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_warn_rate_limit_ms() -> u64 {
|
||||
DEFAULT_ME_WARN_RATE_LIMIT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_upstream_connect_retry_attempts() -> u32 {
|
||||
DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS
|
||||
}
|
||||
|
||||
pub(crate) fn default_upstream_connect_retry_backoff_ms() -> u64 {
|
||||
100
|
||||
}
|
||||
|
||||
pub(crate) fn default_upstream_unhealthy_fail_threshold() -> u32 {
|
||||
DEFAULT_UPSTREAM_UNHEALTHY_FAIL_THRESHOLD
|
||||
}
|
||||
|
||||
pub(crate) fn default_upstream_connect_budget_ms() -> u64 {
|
||||
DEFAULT_UPSTREAM_CONNECT_BUDGET_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_upstream_connect_failfast_hard_errors() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_rpc_proxy_req_every() -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_crypto_pending_buffer() -> usize {
|
||||
256 * 1024
|
||||
}
|
||||
|
||||
pub(crate) fn default_max_client_frame() -> usize {
|
||||
16 * 1024 * 1024
|
||||
}
|
||||
|
||||
pub(crate) fn default_desync_all_full() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_backpressure_base_timeout_ms() -> u64 {
|
||||
25
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_backpressure_high_timeout_ms() -> u64 {
|
||||
120
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_backpressure_high_watermark_pct() -> u8 {
|
||||
80
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_no_writer_wait_ms() -> u64 {
|
||||
250
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_inline_recovery_attempts() -> u32 {
|
||||
3
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_inline_recovery_wait_ms() -> u64 {
|
||||
3000
|
||||
}
|
||||
|
||||
pub(crate) fn default_beobachten_minutes() -> u64 {
|
||||
10
|
||||
}
|
||||
|
||||
pub(crate) fn default_beobachten_flush_secs() -> u64 {
|
||||
15
|
||||
}
|
||||
|
||||
pub(crate) fn default_beobachten_file() -> String {
|
||||
"cache/beobachten.txt".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_new_session_tickets() -> u8 {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_full_cert_ttl_secs() -> u64 {
|
||||
90
|
||||
}
|
||||
|
||||
pub(crate) fn default_server_hello_delay_min_ms() -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_server_hello_delay_max_ms() -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_alpn_enforce() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_stun_servers() -> Vec<String> {
|
||||
vec![
|
||||
"stun.l.google.com:5349".to_string(),
|
||||
"stun1.l.google.com:3478".to_string(),
|
||||
"stun.gmx.net:3478".to_string(),
|
||||
"stun.l.google.com:19302".to_string(),
|
||||
"stun.1und1.de:3478".to_string(),
|
||||
"stun1.l.google.com:19302".to_string(),
|
||||
"stun2.l.google.com:19302".to_string(),
|
||||
"stun3.l.google.com:19302".to_string(),
|
||||
"stun4.l.google.com:19302".to_string(),
|
||||
"stun.services.mozilla.com:3478".to_string(),
|
||||
"stun.stunprotocol.org:3478".to_string(),
|
||||
"stun.nextcloud.com:3478".to_string(),
|
||||
"stun.voip.eutelia.it:3478".to_string(),
|
||||
]
|
||||
}
|
||||
|
||||
pub(crate) fn default_http_ip_detect_urls() -> Vec<String> {
|
||||
vec![
|
||||
"https://ifconfig.me/ip".to_string(),
|
||||
"https://api.ipify.org".to_string(),
|
||||
]
|
||||
}
|
||||
|
||||
pub(crate) fn default_cache_public_ip_path() -> String {
|
||||
"cache/public_ip.txt".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_secret_reload_secs() -> u64 {
|
||||
60 * 60
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_config_reload_secs() -> u64 {
|
||||
60 * 60
|
||||
}
|
||||
|
||||
pub(crate) fn default_update_every_secs() -> u64 {
|
||||
5 * 60
|
||||
}
|
||||
|
||||
pub(crate) fn default_update_every() -> Option<u64> {
|
||||
Some(default_update_every_secs())
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_reinit_every_secs() -> u64 {
|
||||
15 * 60
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_reinit_singleflight() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_reinit_trigger_channel() -> usize {
|
||||
64
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_reinit_coalesce_window_ms() -> u64 {
|
||||
200
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_hardswap_warmup_delay_min_ms() -> u64 {
|
||||
1000
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_hardswap_warmup_delay_max_ms() -> u64 {
|
||||
2000
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_hardswap_warmup_extra_passes() -> u8 {
|
||||
3
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_hardswap_warmup_pass_backoff_base_ms() -> u64 {
|
||||
500
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_config_stable_snapshots() -> u8 {
|
||||
2
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_config_apply_cooldown_secs() -> u64 {
|
||||
300
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_snapshot_require_http_2xx() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_snapshot_reject_empty_map() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_snapshot_min_proxy_for_lines() -> u32 {
|
||||
1
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_secret_stable_snapshots() -> u8 {
|
||||
2
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_secret_rotate_runtime() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_secret_atomic_snapshot() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_secret_len_max() -> usize {
|
||||
256
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_reinit_drain_timeout_secs() -> u64 {
|
||||
120
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_ttl_secs() -> u64 {
|
||||
90
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_bind_stale_ttl_secs() -> u64 {
|
||||
default_me_pool_drain_ttl_secs()
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_min_fresh_ratio() -> f32 {
|
||||
0.8
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_deterministic_writer_sort() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_hardswap() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_ntp_check() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_ntp_servers() -> Vec<String> {
|
||||
vec!["pool.ntp.org".to_string()]
|
||||
}
|
||||
|
||||
pub(crate) fn default_fast_mode_min_tls_record() -> usize {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_degradation_min_unavailable_dc_groups() -> u8 {
|
||||
2
|
||||
}
|
||||
|
||||
pub(crate) fn default_listen_addr_ipv6() -> String {
|
||||
DEFAULT_LISTEN_ADDR_IPV6.to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_listen_addr_ipv6_opt() -> Option<String> {
|
||||
Some(default_listen_addr_ipv6())
|
||||
}
|
||||
|
||||
pub(crate) fn default_access_users() -> HashMap<String, String> {
|
||||
HashMap::from([(
|
||||
DEFAULT_ACCESS_USER.to_string(),
|
||||
DEFAULT_ACCESS_SECRET.to_string(),
|
||||
)])
|
||||
}
|
||||
|
||||
pub(crate) fn default_user_max_unique_ips_window_secs() -> u64 {
|
||||
DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS
|
||||
}
|
||||
|
||||
// Custom deserializer helpers
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub(crate) enum OneOrMany {
|
||||
One(String),
|
||||
Many(Vec<String>),
|
||||
}
|
||||
|
||||
pub(crate) fn deserialize_dc_overrides<'de, D>(
|
||||
deserializer: D,
|
||||
) -> std::result::Result<HashMap<String, Vec<String>>, D::Error>
|
||||
where
|
||||
D: serde::de::Deserializer<'de>,
|
||||
{
|
||||
let raw: HashMap<String, OneOrMany> = HashMap::deserialize(deserializer)?;
|
||||
let mut out = HashMap::new();
|
||||
for (dc, val) in raw {
|
||||
let mut addrs = match val {
|
||||
OneOrMany::One(s) => vec![s],
|
||||
OneOrMany::Many(v) => v,
|
||||
};
|
||||
addrs.retain(|s| !s.trim().is_empty());
|
||||
if !addrs.is_empty() {
|
||||
out.insert(dc, addrs);
|
||||
}
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
1181
src/config/hot_reload.rs
Normal file
1181
src/config/hot_reload.rs
Normal file
File diff suppressed because it is too large
Load Diff
1935
src/config/load.rs
Normal file
1935
src/config/load.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,227 +1,9 @@
|
||||
//! Configuration
|
||||
//! Configuration.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::net::IpAddr;
|
||||
use std::path::Path;
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use crate::error::{ProxyError, Result};
|
||||
pub(crate) mod defaults;
|
||||
mod types;
|
||||
mod load;
|
||||
pub mod hot_reload;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ProxyModes {
|
||||
#[serde(default)]
|
||||
pub classic: bool,
|
||||
#[serde(default)]
|
||||
pub secure: bool,
|
||||
#[serde(default = "default_true")]
|
||||
pub tls: bool,
|
||||
}
|
||||
|
||||
fn default_true() -> bool { true }
|
||||
|
||||
impl Default for ProxyModes {
|
||||
fn default() -> Self {
|
||||
Self { classic: true, secure: true, tls: true }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ProxyConfig {
|
||||
#[serde(default = "default_port")]
|
||||
pub port: u16,
|
||||
|
||||
#[serde(default)]
|
||||
pub users: HashMap<String, String>,
|
||||
|
||||
#[serde(default)]
|
||||
pub ad_tag: Option<String>,
|
||||
|
||||
#[serde(default)]
|
||||
pub modes: ProxyModes,
|
||||
|
||||
#[serde(default = "default_tls_domain")]
|
||||
pub tls_domain: String,
|
||||
|
||||
#[serde(default = "default_true")]
|
||||
pub mask: bool,
|
||||
|
||||
#[serde(default)]
|
||||
pub mask_host: Option<String>,
|
||||
|
||||
#[serde(default = "default_mask_port")]
|
||||
pub mask_port: u16,
|
||||
|
||||
#[serde(default)]
|
||||
pub prefer_ipv6: bool,
|
||||
|
||||
#[serde(default = "default_true")]
|
||||
pub fast_mode: bool,
|
||||
|
||||
#[serde(default)]
|
||||
pub use_middle_proxy: bool,
|
||||
|
||||
#[serde(default)]
|
||||
pub user_max_tcp_conns: HashMap<String, usize>,
|
||||
|
||||
#[serde(default)]
|
||||
pub user_expirations: HashMap<String, DateTime<Utc>>,
|
||||
|
||||
#[serde(default)]
|
||||
pub user_data_quota: HashMap<String, u64>,
|
||||
|
||||
#[serde(default = "default_replay_check_len")]
|
||||
pub replay_check_len: usize,
|
||||
|
||||
#[serde(default)]
|
||||
pub ignore_time_skew: bool,
|
||||
|
||||
#[serde(default = "default_handshake_timeout")]
|
||||
pub client_handshake_timeout: u64,
|
||||
|
||||
#[serde(default = "default_connect_timeout")]
|
||||
pub tg_connect_timeout: u64,
|
||||
|
||||
#[serde(default = "default_keepalive")]
|
||||
pub client_keepalive: u64,
|
||||
|
||||
#[serde(default = "default_ack_timeout")]
|
||||
pub client_ack_timeout: u64,
|
||||
|
||||
#[serde(default = "default_listen_addr")]
|
||||
pub listen_addr_ipv4: String,
|
||||
|
||||
#[serde(default)]
|
||||
pub listen_addr_ipv6: Option<String>,
|
||||
|
||||
#[serde(default)]
|
||||
pub listen_unix_sock: Option<String>,
|
||||
|
||||
#[serde(default)]
|
||||
pub metrics_port: Option<u16>,
|
||||
|
||||
#[serde(default = "default_metrics_whitelist")]
|
||||
pub metrics_whitelist: Vec<IpAddr>,
|
||||
|
||||
#[serde(default = "default_fake_cert_len")]
|
||||
pub fake_cert_len: usize,
|
||||
}
|
||||
|
||||
fn default_port() -> u16 { 443 }
|
||||
fn default_tls_domain() -> String { "www.google.com".to_string() }
|
||||
fn default_mask_port() -> u16 { 443 }
|
||||
fn default_replay_check_len() -> usize { 65536 }
|
||||
fn default_handshake_timeout() -> u64 { 10 }
|
||||
fn default_connect_timeout() -> u64 { 10 }
|
||||
fn default_keepalive() -> u64 { 600 }
|
||||
fn default_ack_timeout() -> u64 { 300 }
|
||||
fn default_listen_addr() -> String { "0.0.0.0".to_string() }
|
||||
fn default_fake_cert_len() -> usize { 2048 }
|
||||
|
||||
fn default_metrics_whitelist() -> Vec<IpAddr> {
|
||||
vec![
|
||||
"127.0.0.1".parse().unwrap(),
|
||||
"::1".parse().unwrap(),
|
||||
]
|
||||
}
|
||||
|
||||
impl Default for ProxyConfig {
|
||||
fn default() -> Self {
|
||||
let mut users = HashMap::new();
|
||||
users.insert("default".to_string(), "00000000000000000000000000000000".to_string());
|
||||
|
||||
Self {
|
||||
port: default_port(),
|
||||
users,
|
||||
ad_tag: None,
|
||||
modes: ProxyModes::default(),
|
||||
tls_domain: default_tls_domain(),
|
||||
mask: true,
|
||||
mask_host: None,
|
||||
mask_port: default_mask_port(),
|
||||
prefer_ipv6: false,
|
||||
fast_mode: true,
|
||||
use_middle_proxy: false,
|
||||
user_max_tcp_conns: HashMap::new(),
|
||||
user_expirations: HashMap::new(),
|
||||
user_data_quota: HashMap::new(),
|
||||
replay_check_len: default_replay_check_len(),
|
||||
ignore_time_skew: false,
|
||||
client_handshake_timeout: default_handshake_timeout(),
|
||||
tg_connect_timeout: default_connect_timeout(),
|
||||
client_keepalive: default_keepalive(),
|
||||
client_ack_timeout: default_ack_timeout(),
|
||||
listen_addr_ipv4: default_listen_addr(),
|
||||
listen_addr_ipv6: Some("::".to_string()),
|
||||
listen_unix_sock: None,
|
||||
metrics_port: None,
|
||||
metrics_whitelist: default_metrics_whitelist(),
|
||||
fake_cert_len: default_fake_cert_len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProxyConfig {
|
||||
pub fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
|
||||
let content = std::fs::read_to_string(path)
|
||||
.map_err(|e| ProxyError::Config(e.to_string()))?;
|
||||
|
||||
let mut config: ProxyConfig = toml::from_str(&content)
|
||||
.map_err(|e| ProxyError::Config(e.to_string()))?;
|
||||
|
||||
// Validate secrets
|
||||
for (user, secret) in &config.users {
|
||||
if !secret.chars().all(|c| c.is_ascii_hexdigit()) || secret.len() != 32 {
|
||||
return Err(ProxyError::InvalidSecret {
|
||||
user: user.clone(),
|
||||
reason: "Must be 32 hex characters".to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Default mask_host
|
||||
if config.mask_host.is_none() {
|
||||
config.mask_host = Some(config.tls_domain.clone());
|
||||
}
|
||||
|
||||
// Random fake_cert_len
|
||||
use rand::Rng;
|
||||
config.fake_cert_len = rand::thread_rng().gen_range(1024..4096);
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
pub fn validate(&self) -> Result<()> {
|
||||
if self.users.is_empty() {
|
||||
return Err(ProxyError::Config("No users configured".to_string()));
|
||||
}
|
||||
|
||||
if !self.modes.classic && !self.modes.secure && !self.modes.tls {
|
||||
return Err(ProxyError::Config("No modes enabled".to_string()));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_default_config() {
|
||||
let config = ProxyConfig::default();
|
||||
assert_eq!(config.port, 443);
|
||||
assert!(config.modes.tls);
|
||||
assert_eq!(config.client_keepalive, 600);
|
||||
assert_eq!(config.client_ack_timeout, 300);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_validate() {
|
||||
let mut config = ProxyConfig::default();
|
||||
assert!(config.validate().is_ok());
|
||||
|
||||
config.users.clear();
|
||||
assert!(config.validate().is_err());
|
||||
}
|
||||
}
|
||||
pub use load::ProxyConfig;
|
||||
pub use types::*;
|
||||
|
||||
1472
src/config/types.rs
Normal file
1472
src/config/types.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,21 +1,41 @@
|
||||
//! AES
|
||||
//! AES encryption implementations
|
||||
//!
|
||||
//! Provides AES-256-CTR and AES-256-CBC modes for MTProto encryption.
|
||||
//!
|
||||
//! ## Zeroize policy
|
||||
//!
|
||||
//! - `AesCbc` stores raw key/IV bytes and zeroizes them on drop.
|
||||
//! - `AesCtr` wraps an opaque `Aes256Ctr` cipher from the `ctr` crate.
|
||||
//! The expanded key schedule lives inside that type and cannot be
|
||||
//! zeroized from outside. Callers that hold raw key material (e.g.
|
||||
//! `HandshakeSuccess`, `ObfuscationParams`) are responsible for
|
||||
//! zeroizing their own copies.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use aes::Aes256;
|
||||
use ctr::{Ctr128BE, cipher::{KeyIvInit, StreamCipher}};
|
||||
use cbc::{Encryptor as CbcEncryptor, Decryptor as CbcDecryptor};
|
||||
use cbc::cipher::{BlockEncryptMut, BlockDecryptMut, block_padding::NoPadding};
|
||||
use zeroize::Zeroize;
|
||||
use crate::error::{ProxyError, Result};
|
||||
|
||||
type Aes256Ctr = Ctr128BE<Aes256>;
|
||||
type Aes256CbcEnc = CbcEncryptor<Aes256>;
|
||||
type Aes256CbcDec = CbcDecryptor<Aes256>;
|
||||
|
||||
// ============= AES-256-CTR =============
|
||||
|
||||
/// AES-256-CTR encryptor/decryptor
|
||||
///
|
||||
/// CTR mode is symmetric — encryption and decryption are the same operation.
|
||||
///
|
||||
/// **Zeroize note:** The inner `Aes256Ctr` cipher state (expanded key schedule
|
||||
/// + counter) is opaque and cannot be zeroized. If you need to protect key
|
||||
/// material, zeroize the `[u8; 32]` key and `u128` IV at the call site
|
||||
/// before dropping them.
|
||||
pub struct AesCtr {
|
||||
cipher: Aes256Ctr,
|
||||
}
|
||||
|
||||
impl AesCtr {
|
||||
/// Create new AES-CTR cipher with key and IV
|
||||
pub fn new(key: &[u8; 32], iv: u128) -> Self {
|
||||
let iv_bytes = iv.to_be_bytes();
|
||||
Self {
|
||||
@@ -23,6 +43,7 @@ impl AesCtr {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create from key and IV slices
|
||||
pub fn from_key_iv(key: &[u8], iv: &[u8]) -> Result<Self> {
|
||||
if key.len() != 32 {
|
||||
return Err(ProxyError::InvalidKeyLength { expected: 32, got: key.len() });
|
||||
@@ -54,17 +75,37 @@ impl AesCtr {
|
||||
}
|
||||
}
|
||||
|
||||
/// AES-256-CBC Ciphermagic
|
||||
// ============= AES-256-CBC =============
|
||||
|
||||
/// AES-256-CBC cipher with proper chaining
|
||||
///
|
||||
/// Unlike CTR mode, CBC is NOT symmetric — encryption and decryption
|
||||
/// are different operations. This implementation handles CBC chaining
|
||||
/// correctly across multiple blocks.
|
||||
///
|
||||
/// Key and IV are zeroized on drop.
|
||||
pub struct AesCbc {
|
||||
key: [u8; 32],
|
||||
iv: [u8; 16],
|
||||
}
|
||||
|
||||
impl Drop for AesCbc {
|
||||
fn drop(&mut self) {
|
||||
self.key.zeroize();
|
||||
self.iv.zeroize();
|
||||
}
|
||||
}
|
||||
|
||||
impl AesCbc {
|
||||
/// AES block size
|
||||
const BLOCK_SIZE: usize = 16;
|
||||
|
||||
/// Create new AES-CBC cipher with key and IV
|
||||
pub fn new(key: [u8; 32], iv: [u8; 16]) -> Self {
|
||||
Self { key, iv }
|
||||
}
|
||||
|
||||
/// Create from slices
|
||||
pub fn from_slices(key: &[u8], iv: &[u8]) -> Result<Self> {
|
||||
if key.len() != 32 {
|
||||
return Err(ProxyError::InvalidKeyLength { expected: 32, got: key.len() });
|
||||
@@ -79,32 +120,36 @@ impl AesCbc {
|
||||
})
|
||||
}
|
||||
|
||||
/// Encrypt data using CBC mode
|
||||
pub fn encrypt(&self, data: &[u8]) -> Result<Vec<u8>> {
|
||||
if data.len() % 16 != 0 {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
}
|
||||
|
||||
if data.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let mut buffer = data.to_vec();
|
||||
|
||||
let mut encryptor = Aes256CbcEnc::new((&self.key).into(), (&self.iv).into());
|
||||
|
||||
for chunk in buffer.chunks_mut(16) {
|
||||
encryptor.encrypt_block_mut(chunk.into());
|
||||
}
|
||||
|
||||
Ok(buffer)
|
||||
/// Encrypt a single block using raw AES (no chaining)
|
||||
fn encrypt_block(&self, block: &[u8; 16], key_schedule: &aes::Aes256) -> [u8; 16] {
|
||||
use aes::cipher::BlockEncrypt;
|
||||
let mut output = *block;
|
||||
key_schedule.encrypt_block((&mut output).into());
|
||||
output
|
||||
}
|
||||
|
||||
/// Decrypt data using CBC mode
|
||||
pub fn decrypt(&self, data: &[u8]) -> Result<Vec<u8>> {
|
||||
if data.len() % 16 != 0 {
|
||||
/// Decrypt a single block using raw AES (no chaining)
|
||||
fn decrypt_block(&self, block: &[u8; 16], key_schedule: &aes::Aes256) -> [u8; 16] {
|
||||
use aes::cipher::BlockDecrypt;
|
||||
let mut output = *block;
|
||||
key_schedule.decrypt_block((&mut output).into());
|
||||
output
|
||||
}
|
||||
|
||||
/// XOR two 16-byte blocks
|
||||
fn xor_blocks(a: &[u8; 16], b: &[u8; 16]) -> [u8; 16] {
|
||||
let mut result = [0u8; 16];
|
||||
for i in 0..16 {
|
||||
result[i] = a[i] ^ b[i];
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Encrypt data using CBC mode with proper chaining
|
||||
///
|
||||
/// CBC Encryption: C[i] = AES_Encrypt(P[i] XOR C[i-1]), where C[-1] = IV
|
||||
pub fn encrypt(&self, data: &[u8]) -> Result<Vec<u8>> {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
@@ -114,20 +159,57 @@ impl AesCbc {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let mut buffer = data.to_vec();
|
||||
use aes::cipher::KeyInit;
|
||||
let key_schedule = aes::Aes256::new((&self.key).into());
|
||||
|
||||
let mut decryptor = Aes256CbcDec::new((&self.key).into(), (&self.iv).into());
|
||||
let mut result = Vec::with_capacity(data.len());
|
||||
let mut prev_ciphertext = self.iv;
|
||||
|
||||
for chunk in buffer.chunks_mut(16) {
|
||||
decryptor.decrypt_block_mut(chunk.into());
|
||||
for chunk in data.chunks(Self::BLOCK_SIZE) {
|
||||
let plaintext: [u8; 16] = chunk.try_into().unwrap();
|
||||
let xored = Self::xor_blocks(&plaintext, &prev_ciphertext);
|
||||
let ciphertext = self.encrypt_block(&xored, &key_schedule);
|
||||
prev_ciphertext = ciphertext;
|
||||
result.extend_from_slice(&ciphertext);
|
||||
}
|
||||
|
||||
Ok(buffer)
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Decrypt data using CBC mode with proper chaining
|
||||
///
|
||||
/// CBC Decryption: P[i] = AES_Decrypt(C[i]) XOR C[i-1], where C[-1] = IV
|
||||
pub fn decrypt(&self, data: &[u8]) -> Result<Vec<u8>> {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
}
|
||||
|
||||
if data.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
use aes::cipher::KeyInit;
|
||||
let key_schedule = aes::Aes256::new((&self.key).into());
|
||||
|
||||
let mut result = Vec::with_capacity(data.len());
|
||||
let mut prev_ciphertext = self.iv;
|
||||
|
||||
for chunk in data.chunks(Self::BLOCK_SIZE) {
|
||||
let ciphertext: [u8; 16] = chunk.try_into().unwrap();
|
||||
let decrypted = self.decrypt_block(&ciphertext, &key_schedule);
|
||||
let plaintext = Self::xor_blocks(&decrypted, &prev_ciphertext);
|
||||
prev_ciphertext = ciphertext;
|
||||
result.extend_from_slice(&plaintext);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Encrypt data in-place
|
||||
pub fn encrypt_in_place(&self, data: &mut [u8]) -> Result<()> {
|
||||
if data.len() % 16 != 0 {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
@@ -137,10 +219,22 @@ impl AesCbc {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut encryptor = Aes256CbcEnc::new((&self.key).into(), (&self.iv).into());
|
||||
use aes::cipher::KeyInit;
|
||||
let key_schedule = aes::Aes256::new((&self.key).into());
|
||||
|
||||
for chunk in data.chunks_mut(16) {
|
||||
encryptor.encrypt_block_mut(chunk.into());
|
||||
let mut prev_ciphertext = self.iv;
|
||||
|
||||
for i in (0..data.len()).step_by(Self::BLOCK_SIZE) {
|
||||
let block = &mut data[i..i + Self::BLOCK_SIZE];
|
||||
|
||||
for j in 0..Self::BLOCK_SIZE {
|
||||
block[j] ^= prev_ciphertext[j];
|
||||
}
|
||||
|
||||
let block_array: &mut [u8; 16] = block.try_into().unwrap();
|
||||
*block_array = self.encrypt_block(block_array, &key_schedule);
|
||||
|
||||
prev_ciphertext = *block_array;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -148,7 +242,7 @@ impl AesCbc {
|
||||
|
||||
/// Decrypt data in-place
|
||||
pub fn decrypt_in_place(&self, data: &mut [u8]) -> Result<()> {
|
||||
if data.len() % 16 != 0 {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
@@ -158,16 +252,32 @@ impl AesCbc {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut decryptor = Aes256CbcDec::new((&self.key).into(), (&self.iv).into());
|
||||
use aes::cipher::KeyInit;
|
||||
let key_schedule = aes::Aes256::new((&self.key).into());
|
||||
|
||||
for chunk in data.chunks_mut(16) {
|
||||
decryptor.decrypt_block_mut(chunk.into());
|
||||
let mut prev_ciphertext = self.iv;
|
||||
|
||||
for i in (0..data.len()).step_by(Self::BLOCK_SIZE) {
|
||||
let block = &mut data[i..i + Self::BLOCK_SIZE];
|
||||
|
||||
let current_ciphertext: [u8; 16] = block.try_into().unwrap();
|
||||
|
||||
let block_array: &mut [u8; 16] = block.try_into().unwrap();
|
||||
*block_array = self.decrypt_block(block_array, &key_schedule);
|
||||
|
||||
for j in 0..Self::BLOCK_SIZE {
|
||||
block[j] ^= prev_ciphertext[j];
|
||||
}
|
||||
|
||||
prev_ciphertext = current_ciphertext;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// ============= Encryption Traits =============
|
||||
|
||||
/// Trait for unified encryption interface
|
||||
pub trait Encryptor: Send + Sync {
|
||||
fn encrypt(&mut self, data: &[u8]) -> Vec<u8>;
|
||||
@@ -209,6 +319,8 @@ impl Decryptor for PassthroughEncryptor {
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
// ============= AES-CTR Tests =============
|
||||
|
||||
#[test]
|
||||
fn test_aes_ctr_roundtrip() {
|
||||
let key = [0u8; 32];
|
||||
@@ -225,12 +337,32 @@ mod tests {
|
||||
assert_eq!(original.as_slice(), decrypted.as_slice());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aes_ctr_in_place() {
|
||||
let key = [0x42u8; 32];
|
||||
let iv = 999u128;
|
||||
|
||||
let original = b"Test data for in-place encryption";
|
||||
let mut data = original.to_vec();
|
||||
|
||||
let mut cipher = AesCtr::new(&key, iv);
|
||||
cipher.apply(&mut data);
|
||||
|
||||
assert_ne!(&data[..], original);
|
||||
|
||||
let mut cipher = AesCtr::new(&key, iv);
|
||||
cipher.apply(&mut data);
|
||||
|
||||
assert_eq!(&data[..], original);
|
||||
}
|
||||
|
||||
// ============= AES-CBC Tests =============
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_roundtrip() {
|
||||
let key = [0u8; 32];
|
||||
let iv = [0u8; 16];
|
||||
|
||||
// Must be aligned to 16 bytes
|
||||
let original = [0u8; 32];
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
@@ -244,45 +376,48 @@ mod tests {
|
||||
fn test_aes_cbc_chaining_works() {
|
||||
let key = [0x42u8; 32];
|
||||
let iv = [0x00u8; 16];
|
||||
|
||||
let plaintext = [0xAA_u8; 32];
|
||||
|
||||
let plaintext = [0xAAu8; 32];
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
let ciphertext = cipher.encrypt(&plaintext).unwrap();
|
||||
|
||||
// CBC Corrections
|
||||
let block1 = &ciphertext[0..16];
|
||||
let block2 = &ciphertext[16..32];
|
||||
|
||||
assert_ne!(block1, block2, "CBC chaining broken: identical plaintext blocks produced identical ciphertext");
|
||||
assert_ne!(
|
||||
block1, block2,
|
||||
"CBC chaining broken: identical plaintext blocks produced identical ciphertext"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_known_vector() {
|
||||
fn test_aes_cbc_known_vector() {
|
||||
let key = [0u8; 32];
|
||||
let iv = [0u8; 16];
|
||||
|
||||
// 3 Datablocks
|
||||
let plaintext = [
|
||||
0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
|
||||
0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF,
|
||||
// Block 2
|
||||
0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
|
||||
0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF,
|
||||
// Block 3 - different
|
||||
0xFF, 0xEE, 0xDD, 0xCC, 0xBB, 0xAA, 0x99, 0x88,
|
||||
0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11, 0x00,
|
||||
];
|
||||
let plaintext = [0u8; 16];
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
let ciphertext = cipher.encrypt(&plaintext).unwrap();
|
||||
|
||||
// Decrypt + Verify
|
||||
let decrypted = cipher.decrypt(&ciphertext).unwrap();
|
||||
assert_eq!(plaintext.as_slice(), decrypted.as_slice());
|
||||
|
||||
// Verify Ciphertexts Block 1 != Block 2
|
||||
assert_ne!(&ciphertext[0..16], &ciphertext[16..32]);
|
||||
assert_ne!(ciphertext.as_slice(), plaintext.as_slice());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_multi_block() {
|
||||
let key = [0x12u8; 32];
|
||||
let iv = [0x34u8; 16];
|
||||
|
||||
let plaintext: Vec<u8> = (0..80).collect();
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
let ciphertext = cipher.encrypt(&plaintext).unwrap();
|
||||
let decrypted = cipher.decrypt(&ciphertext).unwrap();
|
||||
|
||||
assert_eq!(plaintext, decrypted);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -290,8 +425,8 @@ mod tests {
|
||||
let key = [0x12u8; 32];
|
||||
let iv = [0x34u8; 16];
|
||||
|
||||
let original = [0x56u8; 48]; // 3 blocks
|
||||
let mut buffer = original.clone();
|
||||
let original = [0x56u8; 48];
|
||||
let mut buffer = original;
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
|
||||
@@ -317,35 +452,93 @@ mod tests {
|
||||
fn test_aes_cbc_unaligned_error() {
|
||||
let cipher = AesCbc::new([0u8; 32], [0u8; 16]);
|
||||
|
||||
// 15 bytes
|
||||
let result = cipher.encrypt(&[0u8; 15]);
|
||||
assert!(result.is_err());
|
||||
|
||||
// 17 bytes
|
||||
let result = cipher.encrypt(&[0u8; 17]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_avalanche_effect() {
|
||||
// Cipherplane
|
||||
|
||||
let key = [0xAB; 32];
|
||||
let iv = [0xCD; 16];
|
||||
|
||||
let mut plaintext1 = [0u8; 32];
|
||||
let plaintext1 = [0u8; 32];
|
||||
let mut plaintext2 = [0u8; 32];
|
||||
plaintext2[0] = 0x01; // Один бит отличается
|
||||
plaintext2[0] = 0x01;
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
|
||||
let ciphertext1 = cipher.encrypt(&plaintext1).unwrap();
|
||||
let ciphertext2 = cipher.encrypt(&plaintext2).unwrap();
|
||||
|
||||
// First Blocks Diff
|
||||
assert_ne!(&ciphertext1[0..16], &ciphertext2[0..16]);
|
||||
|
||||
// Second Blocks Diff
|
||||
assert_ne!(&ciphertext1[16..32], &ciphertext2[16..32]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_iv_matters() {
|
||||
let key = [0x55; 32];
|
||||
let plaintext = [0x77u8; 16];
|
||||
|
||||
let cipher1 = AesCbc::new(key, [0u8; 16]);
|
||||
let cipher2 = AesCbc::new(key, [1u8; 16]);
|
||||
|
||||
let ciphertext1 = cipher1.encrypt(&plaintext).unwrap();
|
||||
let ciphertext2 = cipher2.encrypt(&plaintext).unwrap();
|
||||
|
||||
assert_ne!(ciphertext1, ciphertext2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_deterministic() {
|
||||
let key = [0x99; 32];
|
||||
let iv = [0x88; 16];
|
||||
let plaintext = [0x77u8; 32];
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
|
||||
let ciphertext1 = cipher.encrypt(&plaintext).unwrap();
|
||||
let ciphertext2 = cipher.encrypt(&plaintext).unwrap();
|
||||
|
||||
assert_eq!(ciphertext1, ciphertext2);
|
||||
}
|
||||
|
||||
// ============= Zeroize Tests =============
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_zeroize_on_drop() {
|
||||
let key = [0xAA; 32];
|
||||
let iv = [0xBB; 16];
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
// Verify key/iv are set
|
||||
assert_eq!(cipher.key, [0xAA; 32]);
|
||||
assert_eq!(cipher.iv, [0xBB; 16]);
|
||||
|
||||
drop(cipher);
|
||||
// After drop, key/iv are zeroized (can't observe directly,
|
||||
// but the Drop impl runs without panic)
|
||||
}
|
||||
|
||||
// ============= Error Handling Tests =============
|
||||
|
||||
#[test]
|
||||
fn test_invalid_key_length() {
|
||||
let result = AesCtr::from_key_iv(&[0u8; 16], &[0u8; 16]);
|
||||
assert!(result.is_err());
|
||||
|
||||
let result = AesCbc::from_slices(&[0u8; 16], &[0u8; 16]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_iv_length() {
|
||||
let result = AesCtr::from_key_iv(&[0u8; 32], &[0u8; 8]);
|
||||
assert!(result.is_err());
|
||||
|
||||
let result = AesCbc::from_slices(&[0u8; 32], &[0u8; 8]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,16 @@
|
||||
//! Cryptographic hash functions
|
||||
//!
|
||||
//! ## Protocol-required algorithms
|
||||
//!
|
||||
//! This module exposes MD5 and SHA-1 alongside SHA-256. These weaker
|
||||
//! hash functions are **required by the Telegram Middle Proxy protocol**
|
||||
//! (`derive_middleproxy_keys`) and cannot be replaced without breaking
|
||||
//! compatibility. They are NOT used for any security-sensitive purpose
|
||||
//! outside of that specific key derivation scheme mandated by Telegram.
|
||||
//!
|
||||
//! Static analysis tools (CodeQL, cargo-audit) may flag them — the
|
||||
//! usages are intentional and protocol-mandated.
|
||||
|
||||
use hmac::{Hmac, Mac};
|
||||
use sha2::Sha256;
|
||||
use md5::Md5;
|
||||
@@ -21,14 +34,16 @@ pub fn sha256_hmac(key: &[u8], data: &[u8]) -> [u8; 32] {
|
||||
mac.finalize().into_bytes().into()
|
||||
}
|
||||
|
||||
/// SHA-1
|
||||
/// SHA-1 — **protocol-required** by Telegram Middle Proxy key derivation.
|
||||
/// Not used for general-purpose hashing.
|
||||
pub fn sha1(data: &[u8]) -> [u8; 20] {
|
||||
let mut hasher = Sha1::new();
|
||||
hasher.update(data);
|
||||
hasher.finalize().into()
|
||||
}
|
||||
|
||||
/// MD5
|
||||
/// MD5 — **protocol-required** by Telegram Middle Proxy key derivation.
|
||||
/// Not used for general-purpose hashing.
|
||||
pub fn md5(data: &[u8]) -> [u8; 16] {
|
||||
let mut hasher = Md5::new();
|
||||
hasher.update(data);
|
||||
@@ -40,7 +55,61 @@ pub fn crc32(data: &[u8]) -> u32 {
|
||||
crc32fast::hash(data)
|
||||
}
|
||||
|
||||
/// Middle Proxy Keygen
|
||||
/// CRC32C (Castagnoli)
|
||||
pub fn crc32c(data: &[u8]) -> u32 {
|
||||
crc32c::crc32c(data)
|
||||
}
|
||||
|
||||
/// Build the exact prekey buffer used by Telegram Middle Proxy KDF.
|
||||
///
|
||||
/// Returned buffer layout (IPv4):
|
||||
/// nonce_srv | nonce_clt | clt_ts | srv_ip | clt_port | purpose | clt_ip | srv_port | secret | nonce_srv | [clt_v6 | srv_v6] | nonce_clt
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn build_middleproxy_prekey(
|
||||
nonce_srv: &[u8; 16],
|
||||
nonce_clt: &[u8; 16],
|
||||
clt_ts: &[u8; 4],
|
||||
srv_ip: Option<&[u8]>,
|
||||
clt_port: &[u8; 2],
|
||||
purpose: &[u8],
|
||||
clt_ip: Option<&[u8]>,
|
||||
srv_port: &[u8; 2],
|
||||
secret: &[u8],
|
||||
clt_ipv6: Option<&[u8; 16]>,
|
||||
srv_ipv6: Option<&[u8; 16]>,
|
||||
) -> Vec<u8> {
|
||||
const EMPTY_IP: [u8; 4] = [0, 0, 0, 0];
|
||||
|
||||
let srv_ip = srv_ip.unwrap_or(&EMPTY_IP);
|
||||
let clt_ip = clt_ip.unwrap_or(&EMPTY_IP);
|
||||
|
||||
let mut s = Vec::with_capacity(256);
|
||||
s.extend_from_slice(nonce_srv);
|
||||
s.extend_from_slice(nonce_clt);
|
||||
s.extend_from_slice(clt_ts);
|
||||
s.extend_from_slice(srv_ip);
|
||||
s.extend_from_slice(clt_port);
|
||||
s.extend_from_slice(purpose);
|
||||
s.extend_from_slice(clt_ip);
|
||||
s.extend_from_slice(srv_port);
|
||||
s.extend_from_slice(secret);
|
||||
s.extend_from_slice(nonce_srv);
|
||||
|
||||
if let (Some(clt_v6), Some(srv_v6)) = (clt_ipv6, srv_ipv6) {
|
||||
s.extend_from_slice(clt_v6);
|
||||
s.extend_from_slice(srv_v6);
|
||||
}
|
||||
|
||||
s.extend_from_slice(nonce_clt);
|
||||
s
|
||||
}
|
||||
|
||||
/// Middle Proxy key derivation
|
||||
///
|
||||
/// Uses MD5 + SHA-1 as mandated by the Telegram Middle Proxy protocol.
|
||||
/// These algorithms are NOT replaceable here — changing them would break
|
||||
/// interoperability with Telegram's middle proxy infrastructure.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn derive_middleproxy_keys(
|
||||
nonce_srv: &[u8; 16],
|
||||
nonce_clt: &[u8; 16],
|
||||
@@ -54,30 +123,20 @@ pub fn derive_middleproxy_keys(
|
||||
clt_ipv6: Option<&[u8; 16]>,
|
||||
srv_ipv6: Option<&[u8; 16]>,
|
||||
) -> ([u8; 32], [u8; 16]) {
|
||||
const EMPTY_IP: [u8; 4] = [0, 0, 0, 0];
|
||||
|
||||
let srv_ip = srv_ip.unwrap_or(&EMPTY_IP);
|
||||
let clt_ip = clt_ip.unwrap_or(&EMPTY_IP);
|
||||
|
||||
let mut s = Vec::with_capacity(256);
|
||||
s.extend_from_slice(nonce_srv);
|
||||
s.extend_from_slice(nonce_clt);
|
||||
s.extend_from_slice(clt_ts);
|
||||
s.extend_from_slice(srv_ip);
|
||||
s.extend_from_slice(clt_port);
|
||||
s.extend_from_slice(purpose);
|
||||
s.extend_from_slice(clt_ip);
|
||||
s.extend_from_slice(srv_port);
|
||||
s.extend_from_slice(secret);
|
||||
s.extend_from_slice(nonce_srv);
|
||||
|
||||
if let (Some(clt_v6), Some(srv_v6)) = (clt_ipv6, srv_ipv6) {
|
||||
s.extend_from_slice(clt_v6);
|
||||
s.extend_from_slice(srv_v6);
|
||||
}
|
||||
|
||||
s.extend_from_slice(nonce_clt);
|
||||
|
||||
let s = build_middleproxy_prekey(
|
||||
nonce_srv,
|
||||
nonce_clt,
|
||||
clt_ts,
|
||||
srv_ip,
|
||||
clt_port,
|
||||
purpose,
|
||||
clt_ip,
|
||||
srv_port,
|
||||
secret,
|
||||
clt_ipv6,
|
||||
srv_ipv6,
|
||||
);
|
||||
|
||||
let md5_1 = md5(&s[1..]);
|
||||
let sha1_sum = sha1(&s);
|
||||
let md5_2 = md5(&s[2..]);
|
||||
@@ -87,4 +146,40 @@ pub fn derive_middleproxy_keys(
|
||||
key[12..].copy_from_slice(&sha1_sum);
|
||||
|
||||
(key, md5_2)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn middleproxy_prekey_sha_is_stable() {
|
||||
let nonce_srv = [0x11u8; 16];
|
||||
let nonce_clt = [0x22u8; 16];
|
||||
let clt_ts = 0x44332211u32.to_le_bytes();
|
||||
let srv_ip = Some([149u8, 154, 175, 50].as_ref());
|
||||
let clt_ip = Some([10u8, 0, 0, 1].as_ref());
|
||||
let clt_port = 0x1f90u16.to_le_bytes(); // 8080
|
||||
let srv_port = 0x22b8u16.to_le_bytes(); // 8888
|
||||
let secret = vec![0x55u8; 128];
|
||||
|
||||
let prekey = build_middleproxy_prekey(
|
||||
&nonce_srv,
|
||||
&nonce_clt,
|
||||
&clt_ts,
|
||||
srv_ip,
|
||||
&clt_port,
|
||||
b"CLIENT",
|
||||
clt_ip,
|
||||
&srv_port,
|
||||
&secret,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
let digest = sha256(&prekey);
|
||||
assert_eq!(
|
||||
hex::encode(digest),
|
||||
"934f5facdafd65a44d5c2df90d2f35ddc81faaaeb337949dfeef817c8a7c1e00"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,5 +5,7 @@ pub mod hash;
|
||||
pub mod random;
|
||||
|
||||
pub use aes::{AesCtr, AesCbc};
|
||||
pub use hash::{sha256, sha256_hmac, sha1, md5, crc32};
|
||||
pub use random::{SecureRandom, SECURE_RANDOM};
|
||||
pub use hash::{
|
||||
build_middleproxy_prekey, crc32, crc32c, derive_middleproxy_keys, sha256, sha256_hmac,
|
||||
};
|
||||
pub use random::SecureRandom;
|
||||
|
||||
@@ -1,55 +1,98 @@
|
||||
//! Pseudorandom
|
||||
|
||||
#![allow(deprecated)]
|
||||
#![allow(dead_code)]
|
||||
|
||||
use rand::{Rng, RngCore, SeedableRng};
|
||||
use rand::rngs::StdRng;
|
||||
use parking_lot::Mutex;
|
||||
use zeroize::Zeroize;
|
||||
use crate::crypto::AesCtr;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
/// Global secure random instance
|
||||
pub static SECURE_RANDOM: Lazy<SecureRandom> = Lazy::new(SecureRandom::new);
|
||||
|
||||
/// Cryptographically secure PRNG with AES-CTR
|
||||
pub struct SecureRandom {
|
||||
inner: Mutex<SecureRandomInner>,
|
||||
}
|
||||
|
||||
unsafe impl Send for SecureRandom {}
|
||||
unsafe impl Sync for SecureRandom {}
|
||||
|
||||
struct SecureRandomInner {
|
||||
rng: StdRng,
|
||||
cipher: AesCtr,
|
||||
buffer: Vec<u8>,
|
||||
buffer_start: usize,
|
||||
}
|
||||
|
||||
impl Drop for SecureRandomInner {
|
||||
fn drop(&mut self) {
|
||||
self.buffer.zeroize();
|
||||
}
|
||||
}
|
||||
|
||||
impl SecureRandom {
|
||||
pub fn new() -> Self {
|
||||
let mut rng = StdRng::from_entropy();
|
||||
let mut seed_source = rand::rng();
|
||||
let mut rng = StdRng::from_rng(&mut seed_source);
|
||||
|
||||
let mut key = [0u8; 32];
|
||||
rng.fill_bytes(&mut key);
|
||||
let iv: u128 = rng.gen();
|
||||
let iv: u128 = rng.random();
|
||||
|
||||
let cipher = AesCtr::new(&key, iv);
|
||||
|
||||
// Zeroize local key copy — cipher already consumed it
|
||||
key.zeroize();
|
||||
|
||||
Self {
|
||||
inner: Mutex::new(SecureRandomInner {
|
||||
rng,
|
||||
cipher: AesCtr::new(&key, iv),
|
||||
cipher,
|
||||
buffer: Vec::with_capacity(1024),
|
||||
buffer_start: 0,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate random bytes
|
||||
pub fn bytes(&self, len: usize) -> Vec<u8> {
|
||||
/// Fill a caller-provided buffer with random bytes.
|
||||
pub fn fill(&self, out: &mut [u8]) {
|
||||
let mut inner = self.inner.lock();
|
||||
const CHUNK_SIZE: usize = 512;
|
||||
|
||||
while inner.buffer.len() < len {
|
||||
let mut chunk = vec![0u8; CHUNK_SIZE];
|
||||
inner.rng.fill_bytes(&mut chunk);
|
||||
inner.cipher.apply(&mut chunk);
|
||||
inner.buffer.extend_from_slice(&chunk);
|
||||
|
||||
let mut written = 0usize;
|
||||
while written < out.len() {
|
||||
if inner.buffer_start >= inner.buffer.len() {
|
||||
inner.buffer.clear();
|
||||
inner.buffer_start = 0;
|
||||
}
|
||||
|
||||
if inner.buffer.is_empty() {
|
||||
let mut chunk = vec![0u8; CHUNK_SIZE];
|
||||
inner.rng.fill_bytes(&mut chunk);
|
||||
inner.cipher.apply(&mut chunk);
|
||||
inner.buffer.extend_from_slice(&chunk);
|
||||
inner.buffer_start = 0;
|
||||
}
|
||||
|
||||
let available = inner.buffer.len().saturating_sub(inner.buffer_start);
|
||||
let take = (out.len() - written).min(available);
|
||||
let start = inner.buffer_start;
|
||||
let end = start + take;
|
||||
out[written..written + take].copy_from_slice(&inner.buffer[start..end]);
|
||||
inner.buffer_start = end;
|
||||
if inner.buffer_start >= inner.buffer.len() {
|
||||
inner.buffer.clear();
|
||||
inner.buffer_start = 0;
|
||||
}
|
||||
written += take;
|
||||
}
|
||||
|
||||
inner.buffer.drain(..len).collect()
|
||||
}
|
||||
|
||||
/// Generate random bytes
|
||||
pub fn bytes(&self, len: usize) -> Vec<u8> {
|
||||
let mut out = vec![0u8; len];
|
||||
self.fill(&mut out);
|
||||
out
|
||||
}
|
||||
|
||||
/// Generate random number in range [0, max)
|
||||
@@ -67,7 +110,7 @@ impl SecureRandom {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let bytes_needed = (k + 7) / 8;
|
||||
let bytes_needed = k.div_ceil(8);
|
||||
let bytes = self.bytes(bytes_needed.min(8));
|
||||
|
||||
let mut result = 0u64;
|
||||
@@ -78,7 +121,6 @@ impl SecureRandom {
|
||||
result |= (b as u64) << (i * 8);
|
||||
}
|
||||
|
||||
// Mask extra bits
|
||||
if k < 64 {
|
||||
result &= (1u64 << k) - 1;
|
||||
}
|
||||
@@ -107,13 +149,13 @@ impl SecureRandom {
|
||||
/// Generate random u32
|
||||
pub fn u32(&self) -> u32 {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.rng.gen()
|
||||
inner.rng.random()
|
||||
}
|
||||
|
||||
/// Generate random u64
|
||||
pub fn u64(&self) -> u64 {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.rng.gen()
|
||||
inner.rng.random()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,12 +204,10 @@ mod tests {
|
||||
fn test_bits() {
|
||||
let rng = SecureRandom::new();
|
||||
|
||||
// Single bit should be 0 or 1
|
||||
for _ in 0..100 {
|
||||
assert!(rng.bits(1) <= 1);
|
||||
}
|
||||
|
||||
// 8 bits should be 0-255
|
||||
for _ in 0..100 {
|
||||
assert!(rng.bits(8) <= 255);
|
||||
}
|
||||
@@ -185,10 +225,8 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
// Should have seen all items
|
||||
assert_eq!(seen.len(), 5);
|
||||
|
||||
// Empty slice should return None
|
||||
let empty: Vec<i32> = vec![];
|
||||
assert!(rng.choose(&empty).is_none());
|
||||
}
|
||||
@@ -201,12 +239,10 @@ mod tests {
|
||||
let mut shuffled = original.clone();
|
||||
rng.shuffle(&mut shuffled);
|
||||
|
||||
// Should contain same elements
|
||||
let mut sorted = shuffled.clone();
|
||||
sorted.sort();
|
||||
assert_eq!(sorted, original);
|
||||
|
||||
// Should be different order (with very high probability)
|
||||
assert_ne!(shuffled, original);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
279
src/error.rs
279
src/error.rs
@@ -1,8 +1,167 @@
|
||||
//! Error Types
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::fmt;
|
||||
use std::net::SocketAddr;
|
||||
use thiserror::Error;
|
||||
|
||||
// ============= Stream Errors =============
|
||||
|
||||
/// Errors specific to stream I/O operations
|
||||
#[derive(Debug)]
|
||||
pub enum StreamError {
|
||||
/// Partial read: got fewer bytes than expected
|
||||
PartialRead {
|
||||
expected: usize,
|
||||
got: usize,
|
||||
},
|
||||
/// Partial write: wrote fewer bytes than expected
|
||||
PartialWrite {
|
||||
expected: usize,
|
||||
written: usize,
|
||||
},
|
||||
/// Stream is in poisoned state and cannot be used
|
||||
Poisoned {
|
||||
reason: String,
|
||||
},
|
||||
/// Buffer overflow: attempted to buffer more than allowed
|
||||
BufferOverflow {
|
||||
limit: usize,
|
||||
attempted: usize,
|
||||
},
|
||||
/// Invalid frame format
|
||||
InvalidFrame {
|
||||
details: String,
|
||||
},
|
||||
/// Unexpected end of stream
|
||||
UnexpectedEof,
|
||||
/// Underlying I/O error
|
||||
Io(std::io::Error),
|
||||
}
|
||||
|
||||
impl fmt::Display for StreamError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::PartialRead { expected, got } => {
|
||||
write!(f, "partial read: expected {} bytes, got {}", expected, got)
|
||||
}
|
||||
Self::PartialWrite { expected, written } => {
|
||||
write!(f, "partial write: expected {} bytes, wrote {}", expected, written)
|
||||
}
|
||||
Self::Poisoned { reason } => {
|
||||
write!(f, "stream poisoned: {}", reason)
|
||||
}
|
||||
Self::BufferOverflow { limit, attempted } => {
|
||||
write!(f, "buffer overflow: limit {}, attempted {}", limit, attempted)
|
||||
}
|
||||
Self::InvalidFrame { details } => {
|
||||
write!(f, "invalid frame: {}", details)
|
||||
}
|
||||
Self::UnexpectedEof => {
|
||||
write!(f, "unexpected end of stream")
|
||||
}
|
||||
Self::Io(e) => {
|
||||
write!(f, "I/O error: {}", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for StreamError {
|
||||
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||
match self {
|
||||
Self::Io(e) => Some(e),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for StreamError {
|
||||
fn from(err: std::io::Error) -> Self {
|
||||
Self::Io(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StreamError> for std::io::Error {
|
||||
fn from(err: StreamError) -> Self {
|
||||
match err {
|
||||
StreamError::Io(e) => e,
|
||||
StreamError::UnexpectedEof => {
|
||||
std::io::Error::new(std::io::ErrorKind::UnexpectedEof, err)
|
||||
}
|
||||
StreamError::Poisoned { .. } => {
|
||||
std::io::Error::other(err)
|
||||
}
|
||||
StreamError::BufferOverflow { .. } => {
|
||||
std::io::Error::new(std::io::ErrorKind::OutOfMemory, err)
|
||||
}
|
||||
StreamError::InvalidFrame { .. } => {
|
||||
std::io::Error::new(std::io::ErrorKind::InvalidData, err)
|
||||
}
|
||||
StreamError::PartialRead { .. } | StreamError::PartialWrite { .. } => {
|
||||
std::io::Error::other(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============= Recoverable Trait =============
|
||||
|
||||
/// Trait for errors that may be recoverable
|
||||
pub trait Recoverable {
|
||||
/// Check if error is recoverable (can retry operation)
|
||||
fn is_recoverable(&self) -> bool;
|
||||
|
||||
/// Check if connection can continue after this error
|
||||
fn can_continue(&self) -> bool;
|
||||
}
|
||||
|
||||
impl Recoverable for StreamError {
|
||||
fn is_recoverable(&self) -> bool {
|
||||
match self {
|
||||
Self::PartialRead { .. } | Self::PartialWrite { .. } => true,
|
||||
Self::Io(e) => matches!(
|
||||
e.kind(),
|
||||
std::io::ErrorKind::WouldBlock
|
||||
| std::io::ErrorKind::Interrupted
|
||||
| std::io::ErrorKind::TimedOut
|
||||
),
|
||||
Self::Poisoned { .. }
|
||||
| Self::BufferOverflow { .. }
|
||||
| Self::InvalidFrame { .. }
|
||||
| Self::UnexpectedEof => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn can_continue(&self) -> bool {
|
||||
!matches!(self, Self::Poisoned { .. } | Self::UnexpectedEof | Self::BufferOverflow { .. })
|
||||
}
|
||||
}
|
||||
|
||||
impl Recoverable for std::io::Error {
|
||||
fn is_recoverable(&self) -> bool {
|
||||
matches!(
|
||||
self.kind(),
|
||||
std::io::ErrorKind::WouldBlock
|
||||
| std::io::ErrorKind::Interrupted
|
||||
| std::io::ErrorKind::TimedOut
|
||||
)
|
||||
}
|
||||
|
||||
fn can_continue(&self) -> bool {
|
||||
!matches!(
|
||||
self.kind(),
|
||||
std::io::ErrorKind::BrokenPipe
|
||||
| std::io::ErrorKind::ConnectionReset
|
||||
| std::io::ErrorKind::ConnectionAborted
|
||||
| std::io::ErrorKind::NotConnected
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// ============= Main Proxy Errors =============
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ProxyError {
|
||||
// ============= Crypto Errors =============
|
||||
@@ -13,6 +172,11 @@ pub enum ProxyError {
|
||||
#[error("Invalid key length: expected {expected}, got {got}")]
|
||||
InvalidKeyLength { expected: usize, got: usize },
|
||||
|
||||
// ============= Stream Errors =============
|
||||
|
||||
#[error("Stream error: {0}")]
|
||||
Stream(#[from] StreamError),
|
||||
|
||||
// ============= Protocol Errors =============
|
||||
|
||||
#[error("Invalid handshake: {0}")]
|
||||
@@ -39,6 +203,12 @@ pub enum ProxyError {
|
||||
#[error("Sequence number mismatch: expected={expected}, got={got}")]
|
||||
SeqNoMismatch { expected: i32, got: i32 },
|
||||
|
||||
#[error("TLS handshake failed: {reason}")]
|
||||
TlsHandshakeFailed { reason: String },
|
||||
|
||||
#[error("Telegram handshake timeout")]
|
||||
TgHandshakeTimeout,
|
||||
|
||||
// ============= Network Errors =============
|
||||
|
||||
#[error("Connection timeout to {addr}")]
|
||||
@@ -55,6 +225,9 @@ pub enum ProxyError {
|
||||
#[error("Invalid proxy protocol header")]
|
||||
InvalidProxyProtocol,
|
||||
|
||||
#[error("Proxy error: {0}")]
|
||||
Proxy(String),
|
||||
|
||||
// ============= Config Errors =============
|
||||
|
||||
#[error("Config error: {0}")]
|
||||
@@ -77,27 +250,53 @@ pub enum ProxyError {
|
||||
#[error("Unknown user")]
|
||||
UnknownUser,
|
||||
|
||||
#[error("Rate limited")]
|
||||
RateLimited,
|
||||
|
||||
// ============= General Errors =============
|
||||
|
||||
#[error("Internal error: {0}")]
|
||||
Internal(String),
|
||||
}
|
||||
|
||||
impl Recoverable for ProxyError {
|
||||
fn is_recoverable(&self) -> bool {
|
||||
match self {
|
||||
Self::Stream(e) => e.is_recoverable(),
|
||||
Self::Io(e) => e.is_recoverable(),
|
||||
Self::ConnectionTimeout { .. } => true,
|
||||
Self::RateLimited => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn can_continue(&self) -> bool {
|
||||
match self {
|
||||
Self::Stream(e) => e.can_continue(),
|
||||
Self::Io(e) => e.can_continue(),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Convenient Result type alias
|
||||
pub type Result<T> = std::result::Result<T, ProxyError>;
|
||||
|
||||
/// Result type for stream operations
|
||||
pub type StreamResult<T> = std::result::Result<T, StreamError>;
|
||||
|
||||
/// Result with optional bad client handling
|
||||
#[derive(Debug)]
|
||||
pub enum HandshakeResult<T> {
|
||||
pub enum HandshakeResult<T, R, W> {
|
||||
/// Handshake succeeded
|
||||
Success(T),
|
||||
/// Client failed validation, needs masking
|
||||
BadClient,
|
||||
/// Client failed validation, needs masking. Returns ownership of streams.
|
||||
BadClient { reader: R, writer: W },
|
||||
/// Error occurred
|
||||
Error(ProxyError),
|
||||
}
|
||||
|
||||
impl<T> HandshakeResult<T> {
|
||||
impl<T, R, W> HandshakeResult<T, R, W> {
|
||||
/// Check if successful
|
||||
pub fn is_success(&self) -> bool {
|
||||
matches!(self, HandshakeResult::Success(_))
|
||||
@@ -105,58 +304,87 @@ impl<T> HandshakeResult<T> {
|
||||
|
||||
/// Check if bad client
|
||||
pub fn is_bad_client(&self) -> bool {
|
||||
matches!(self, HandshakeResult::BadClient)
|
||||
}
|
||||
|
||||
/// Convert to Result, treating BadClient as error
|
||||
pub fn into_result(self) -> Result<T> {
|
||||
match self {
|
||||
HandshakeResult::Success(v) => Ok(v),
|
||||
HandshakeResult::BadClient => Err(ProxyError::InvalidHandshake("Bad client".into())),
|
||||
HandshakeResult::Error(e) => Err(e),
|
||||
}
|
||||
matches!(self, HandshakeResult::BadClient { .. })
|
||||
}
|
||||
|
||||
/// Map the success value
|
||||
pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> HandshakeResult<U> {
|
||||
pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> HandshakeResult<U, R, W> {
|
||||
match self {
|
||||
HandshakeResult::Success(v) => HandshakeResult::Success(f(v)),
|
||||
HandshakeResult::BadClient => HandshakeResult::BadClient,
|
||||
HandshakeResult::BadClient { reader, writer } => HandshakeResult::BadClient { reader, writer },
|
||||
HandshakeResult::Error(e) => HandshakeResult::Error(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<ProxyError> for HandshakeResult<T> {
|
||||
impl<T, R, W> From<ProxyError> for HandshakeResult<T, R, W> {
|
||||
fn from(err: ProxyError) -> Self {
|
||||
HandshakeResult::Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<std::io::Error> for HandshakeResult<T> {
|
||||
impl<T, R, W> From<std::io::Error> for HandshakeResult<T, R, W> {
|
||||
fn from(err: std::io::Error) -> Self {
|
||||
HandshakeResult::Error(ProxyError::Io(err))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, R, W> From<StreamError> for HandshakeResult<T, R, W> {
|
||||
fn from(err: StreamError) -> Self {
|
||||
HandshakeResult::Error(ProxyError::Stream(err))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_stream_error_display() {
|
||||
let err = StreamError::PartialRead { expected: 100, got: 50 };
|
||||
assert!(err.to_string().contains("100"));
|
||||
assert!(err.to_string().contains("50"));
|
||||
|
||||
let err = StreamError::Poisoned { reason: "test".into() };
|
||||
assert!(err.to_string().contains("test"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stream_error_recoverable() {
|
||||
assert!(StreamError::PartialRead { expected: 10, got: 5 }.is_recoverable());
|
||||
assert!(StreamError::PartialWrite { expected: 10, written: 5 }.is_recoverable());
|
||||
assert!(!StreamError::Poisoned { reason: "x".into() }.is_recoverable());
|
||||
assert!(!StreamError::UnexpectedEof.is_recoverable());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stream_error_can_continue() {
|
||||
assert!(!StreamError::Poisoned { reason: "x".into() }.can_continue());
|
||||
assert!(!StreamError::UnexpectedEof.can_continue());
|
||||
assert!(StreamError::PartialRead { expected: 10, got: 5 }.can_continue());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stream_error_to_io_error() {
|
||||
let stream_err = StreamError::UnexpectedEof;
|
||||
let io_err: std::io::Error = stream_err.into();
|
||||
assert_eq!(io_err.kind(), std::io::ErrorKind::UnexpectedEof);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_handshake_result() {
|
||||
let success: HandshakeResult<i32> = HandshakeResult::Success(42);
|
||||
let success: HandshakeResult<i32, (), ()> = HandshakeResult::Success(42);
|
||||
assert!(success.is_success());
|
||||
assert!(!success.is_bad_client());
|
||||
|
||||
let bad: HandshakeResult<i32> = HandshakeResult::BadClient;
|
||||
let bad: HandshakeResult<i32, (), ()> = HandshakeResult::BadClient { reader: (), writer: () };
|
||||
assert!(!bad.is_success());
|
||||
assert!(bad.is_bad_client());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_handshake_result_map() {
|
||||
let success: HandshakeResult<i32> = HandshakeResult::Success(42);
|
||||
let success: HandshakeResult<i32, (), ()> = HandshakeResult::Success(42);
|
||||
let mapped = success.map(|x| x * 2);
|
||||
|
||||
match mapped {
|
||||
@@ -165,6 +393,15 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proxy_error_recoverable() {
|
||||
let err = ProxyError::RateLimited;
|
||||
assert!(err.is_recoverable());
|
||||
|
||||
let err = ProxyError::InvalidHandshake("bad".into());
|
||||
assert!(!err.is_recoverable());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_display() {
|
||||
let err = ProxyError::ConnectionTimeout { addr: "1.2.3.4:443".into() };
|
||||
|
||||
625
src/ip_tracker.rs
Normal file
625
src/ip_tracker.rs
Normal file
@@ -0,0 +1,625 @@
|
||||
// IP address tracking and per-user unique IP limiting.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::net::IpAddr;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::config::UserMaxUniqueIpsMode;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct UserIpTracker {
|
||||
active_ips: Arc<RwLock<HashMap<String, HashMap<IpAddr, usize>>>>,
|
||||
recent_ips: Arc<RwLock<HashMap<String, HashMap<IpAddr, Instant>>>>,
|
||||
max_ips: Arc<RwLock<HashMap<String, usize>>>,
|
||||
limit_mode: Arc<RwLock<UserMaxUniqueIpsMode>>,
|
||||
limit_window: Arc<RwLock<Duration>>,
|
||||
last_compact_epoch_secs: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
impl UserIpTracker {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
active_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||
recent_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||
max_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||
limit_mode: Arc::new(RwLock::new(UserMaxUniqueIpsMode::ActiveWindow)),
|
||||
limit_window: Arc::new(RwLock::new(Duration::from_secs(30))),
|
||||
last_compact_epoch_secs: Arc::new(AtomicU64::new(0)),
|
||||
}
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
|
||||
async fn maybe_compact_empty_users(&self) {
|
||||
const COMPACT_INTERVAL_SECS: u64 = 60;
|
||||
let now_epoch_secs = Self::now_epoch_secs();
|
||||
let last_compact_epoch_secs = self.last_compact_epoch_secs.load(Ordering::Relaxed);
|
||||
if now_epoch_secs.saturating_sub(last_compact_epoch_secs) < COMPACT_INTERVAL_SECS {
|
||||
return;
|
||||
}
|
||||
if self
|
||||
.last_compact_epoch_secs
|
||||
.compare_exchange(
|
||||
last_compact_epoch_secs,
|
||||
now_epoch_secs,
|
||||
Ordering::AcqRel,
|
||||
Ordering::Relaxed,
|
||||
)
|
||||
.is_err()
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
let mut recent_ips = self.recent_ips.write().await;
|
||||
let mut users = Vec::<String>::with_capacity(active_ips.len().saturating_add(recent_ips.len()));
|
||||
users.extend(active_ips.keys().cloned());
|
||||
for user in recent_ips.keys() {
|
||||
if !active_ips.contains_key(user) {
|
||||
users.push(user.clone());
|
||||
}
|
||||
}
|
||||
|
||||
for user in users {
|
||||
let active_empty = active_ips.get(&user).map(|ips| ips.is_empty()).unwrap_or(true);
|
||||
let recent_empty = recent_ips.get(&user).map(|ips| ips.is_empty()).unwrap_or(true);
|
||||
if active_empty && recent_empty {
|
||||
active_ips.remove(&user);
|
||||
recent_ips.remove(&user);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn set_limit_policy(&self, mode: UserMaxUniqueIpsMode, window_secs: u64) {
|
||||
{
|
||||
let mut current_mode = self.limit_mode.write().await;
|
||||
*current_mode = mode;
|
||||
}
|
||||
let mut current_window = self.limit_window.write().await;
|
||||
*current_window = Duration::from_secs(window_secs.max(1));
|
||||
}
|
||||
|
||||
pub async fn set_user_limit(&self, username: &str, max_ips: usize) {
|
||||
let mut limits = self.max_ips.write().await;
|
||||
limits.insert(username.to_string(), max_ips);
|
||||
}
|
||||
|
||||
pub async fn remove_user_limit(&self, username: &str) {
|
||||
let mut limits = self.max_ips.write().await;
|
||||
limits.remove(username);
|
||||
}
|
||||
|
||||
pub async fn load_limits(&self, limits: &HashMap<String, usize>) {
|
||||
let mut max_ips = self.max_ips.write().await;
|
||||
max_ips.clone_from(limits);
|
||||
}
|
||||
|
||||
fn prune_recent(user_recent: &mut HashMap<IpAddr, Instant>, now: Instant, window: Duration) {
|
||||
if user_recent.is_empty() {
|
||||
return;
|
||||
}
|
||||
user_recent.retain(|_, seen_at| now.duration_since(*seen_at) <= window);
|
||||
}
|
||||
|
||||
pub async fn check_and_add(&self, username: &str, ip: IpAddr) -> Result<(), String> {
|
||||
self.maybe_compact_empty_users().await;
|
||||
let limit = {
|
||||
let max_ips = self.max_ips.read().await;
|
||||
max_ips.get(username).copied()
|
||||
};
|
||||
let mode = *self.limit_mode.read().await;
|
||||
let window = *self.limit_window.read().await;
|
||||
let now = Instant::now();
|
||||
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
let user_active = active_ips
|
||||
.entry(username.to_string())
|
||||
.or_insert_with(HashMap::new);
|
||||
|
||||
let mut recent_ips = self.recent_ips.write().await;
|
||||
let user_recent = recent_ips
|
||||
.entry(username.to_string())
|
||||
.or_insert_with(HashMap::new);
|
||||
Self::prune_recent(user_recent, now, window);
|
||||
|
||||
if let Some(count) = user_active.get_mut(&ip) {
|
||||
*count = count.saturating_add(1);
|
||||
user_recent.insert(ip, now);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Some(limit) = limit {
|
||||
let active_limit_reached = user_active.len() >= limit;
|
||||
let recent_limit_reached = user_recent.len() >= limit;
|
||||
let deny = match mode {
|
||||
UserMaxUniqueIpsMode::ActiveWindow => active_limit_reached,
|
||||
UserMaxUniqueIpsMode::TimeWindow => recent_limit_reached,
|
||||
UserMaxUniqueIpsMode::Combined => active_limit_reached || recent_limit_reached,
|
||||
};
|
||||
|
||||
if deny {
|
||||
return Err(format!(
|
||||
"IP limit reached for user '{}': active={}/{} recent={}/{} mode={:?}",
|
||||
username,
|
||||
user_active.len(),
|
||||
limit,
|
||||
user_recent.len(),
|
||||
limit,
|
||||
mode
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
user_active.insert(ip, 1);
|
||||
user_recent.insert(ip, now);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn remove_ip(&self, username: &str, ip: IpAddr) {
|
||||
self.maybe_compact_empty_users().await;
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
if let Some(user_ips) = active_ips.get_mut(username) {
|
||||
if let Some(count) = user_ips.get_mut(&ip) {
|
||||
if *count > 1 {
|
||||
*count -= 1;
|
||||
} else {
|
||||
user_ips.remove(&ip);
|
||||
}
|
||||
}
|
||||
if user_ips.is_empty() {
|
||||
active_ips.remove(username);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_recent_counts_for_users(&self, users: &[String]) -> HashMap<String, usize> {
|
||||
let window = *self.limit_window.read().await;
|
||||
let now = Instant::now();
|
||||
let recent_ips = self.recent_ips.read().await;
|
||||
|
||||
let mut counts = HashMap::with_capacity(users.len());
|
||||
for user in users {
|
||||
let count = if let Some(user_recent) = recent_ips.get(user) {
|
||||
user_recent
|
||||
.values()
|
||||
.filter(|seen_at| now.duration_since(**seen_at) <= window)
|
||||
.count()
|
||||
} else {
|
||||
0
|
||||
};
|
||||
counts.insert(user.clone(), count);
|
||||
}
|
||||
counts
|
||||
}
|
||||
|
||||
pub async fn get_active_ips_for_users(&self, users: &[String]) -> HashMap<String, Vec<IpAddr>> {
|
||||
let active_ips = self.active_ips.read().await;
|
||||
let mut out = HashMap::with_capacity(users.len());
|
||||
for user in users {
|
||||
let mut ips = active_ips
|
||||
.get(user)
|
||||
.map(|per_ip| per_ip.keys().copied().collect::<Vec<_>>())
|
||||
.unwrap_or_else(Vec::new);
|
||||
ips.sort();
|
||||
out.insert(user.clone(), ips);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
pub async fn get_recent_ips_for_users(&self, users: &[String]) -> HashMap<String, Vec<IpAddr>> {
|
||||
let window = *self.limit_window.read().await;
|
||||
let now = Instant::now();
|
||||
let recent_ips = self.recent_ips.read().await;
|
||||
|
||||
let mut out = HashMap::with_capacity(users.len());
|
||||
for user in users {
|
||||
let mut ips = if let Some(user_recent) = recent_ips.get(user) {
|
||||
user_recent
|
||||
.iter()
|
||||
.filter(|(_, seen_at)| now.duration_since(**seen_at) <= window)
|
||||
.map(|(ip, _)| *ip)
|
||||
.collect::<Vec<_>>()
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
ips.sort();
|
||||
out.insert(user.clone(), ips);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
pub async fn get_active_ip_count(&self, username: &str) -> usize {
|
||||
let active_ips = self.active_ips.read().await;
|
||||
active_ips.get(username).map(|ips| ips.len()).unwrap_or(0)
|
||||
}
|
||||
|
||||
pub async fn get_active_ips(&self, username: &str) -> Vec<IpAddr> {
|
||||
let active_ips = self.active_ips.read().await;
|
||||
active_ips
|
||||
.get(username)
|
||||
.map(|ips| ips.keys().copied().collect())
|
||||
.unwrap_or_else(Vec::new)
|
||||
}
|
||||
|
||||
pub async fn get_stats(&self) -> Vec<(String, usize, usize)> {
|
||||
let active_ips = self.active_ips.read().await;
|
||||
let max_ips = self.max_ips.read().await;
|
||||
|
||||
let mut stats = Vec::new();
|
||||
for (username, user_ips) in active_ips.iter() {
|
||||
let limit = max_ips.get(username).copied().unwrap_or(0);
|
||||
stats.push((username.clone(), user_ips.len(), limit));
|
||||
}
|
||||
|
||||
stats.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
stats
|
||||
}
|
||||
|
||||
pub async fn clear_user_ips(&self, username: &str) {
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
active_ips.remove(username);
|
||||
drop(active_ips);
|
||||
|
||||
let mut recent_ips = self.recent_ips.write().await;
|
||||
recent_ips.remove(username);
|
||||
}
|
||||
|
||||
pub async fn clear_all(&self) {
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
active_ips.clear();
|
||||
drop(active_ips);
|
||||
|
||||
let mut recent_ips = self.recent_ips.write().await;
|
||||
recent_ips.clear();
|
||||
}
|
||||
|
||||
pub async fn is_ip_active(&self, username: &str, ip: IpAddr) -> bool {
|
||||
let active_ips = self.active_ips.read().await;
|
||||
active_ips
|
||||
.get(username)
|
||||
.map(|ips| ips.contains_key(&ip))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
pub async fn get_user_limit(&self, username: &str) -> Option<usize> {
|
||||
let max_ips = self.max_ips.read().await;
|
||||
max_ips.get(username).copied()
|
||||
}
|
||||
|
||||
pub async fn format_stats(&self) -> String {
|
||||
let stats = self.get_stats().await;
|
||||
|
||||
if stats.is_empty() {
|
||||
return String::from("No active users");
|
||||
}
|
||||
|
||||
let mut output = String::from("User IP Statistics:\n");
|
||||
output.push_str("==================\n");
|
||||
|
||||
for (username, active_count, limit) in stats {
|
||||
output.push_str(&format!(
|
||||
"User: {:<20} Active IPs: {}/{}\n",
|
||||
username,
|
||||
active_count,
|
||||
if limit > 0 {
|
||||
limit.to_string()
|
||||
} else {
|
||||
"unlimited".to_string()
|
||||
}
|
||||
));
|
||||
|
||||
let ips = self.get_active_ips(&username).await;
|
||||
for ip in ips {
|
||||
output.push_str(&format!(" - {}\n", ip));
|
||||
}
|
||||
}
|
||||
|
||||
output
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for UserIpTracker {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
||||
|
||||
fn test_ipv4(oct1: u8, oct2: u8, oct3: u8, oct4: u8) -> IpAddr {
|
||||
IpAddr::V4(Ipv4Addr::new(oct1, oct2, oct3, oct4))
|
||||
}
|
||||
|
||||
fn test_ipv6() -> IpAddr {
|
||||
IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_ip_limit() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 2).await;
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
let ip3 = test_ipv4(192, 168, 1, 3);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
|
||||
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_active_window_rejects_new_ip_and_keeps_existing_session() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 1).await;
|
||||
tracker
|
||||
.set_limit_policy(UserMaxUniqueIpsMode::ActiveWindow, 30)
|
||||
.await;
|
||||
|
||||
let ip1 = test_ipv4(10, 10, 10, 1);
|
||||
let ip2 = test_ipv4(10, 10, 10, 2);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.is_ip_active("test_user", ip1).await);
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
|
||||
// Existing session remains active; only new unique IP is denied.
|
||||
assert!(tracker.is_ip_active("test_user", ip1).await);
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_reconnection_from_same_ip() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 2).await;
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_same_ip_disconnect_keeps_active_while_other_session_alive() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 2).await;
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ip_removal() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 2).await;
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
let ip3 = test_ipv4(192, 168, 1, 3);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
|
||||
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip3).await.is_ok());
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_no_limit() {
|
||||
let tracker = UserIpTracker::new();
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
let ip3 = test_ipv4(192, 168, 1, 3);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip3).await.is_ok());
|
||||
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 3);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_multiple_users() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("user1", 2).await;
|
||||
tracker.set_user_limit("user2", 1).await;
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
|
||||
assert!(tracker.check_and_add("user1", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("user1", ip2).await.is_ok());
|
||||
|
||||
assert!(tracker.check_and_add("user2", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("user2", ip2).await.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ipv6_support() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 2).await;
|
||||
|
||||
let ipv4 = test_ipv4(192, 168, 1, 1);
|
||||
let ipv6 = test_ipv6();
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ipv4).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ipv6).await.is_ok());
|
||||
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_active_ips() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 3).await;
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
|
||||
tracker.check_and_add("test_user", ip1).await.unwrap();
|
||||
tracker.check_and_add("test_user", ip2).await.unwrap();
|
||||
|
||||
let active_ips = tracker.get_active_ips("test_user").await;
|
||||
assert_eq!(active_ips.len(), 2);
|
||||
assert!(active_ips.contains(&ip1));
|
||||
assert!(active_ips.contains(&ip2));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_stats() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("user1", 3).await;
|
||||
tracker.set_user_limit("user2", 2).await;
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
|
||||
tracker.check_and_add("user1", ip1).await.unwrap();
|
||||
tracker.check_and_add("user2", ip2).await.unwrap();
|
||||
|
||||
let stats = tracker.get_stats().await;
|
||||
assert_eq!(stats.len(), 2);
|
||||
|
||||
assert!(stats.iter().any(|(name, _, _)| name == "user1"));
|
||||
assert!(stats.iter().any(|(name, _, _)| name == "user2"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clear_user_ips() {
|
||||
let tracker = UserIpTracker::new();
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
|
||||
tracker.check_and_add("test_user", ip1).await.unwrap();
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
|
||||
tracker.clear_user_ips("test_user").await;
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_is_ip_active() {
|
||||
let tracker = UserIpTracker::new();
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
|
||||
tracker.check_and_add("test_user", ip1).await.unwrap();
|
||||
|
||||
assert!(tracker.is_ip_active("test_user", ip1).await);
|
||||
assert!(!tracker.is_ip_active("test_user", ip2).await);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_load_limits_from_config() {
|
||||
let tracker = UserIpTracker::new();
|
||||
|
||||
let mut config_limits = HashMap::new();
|
||||
config_limits.insert("user1".to_string(), 5);
|
||||
config_limits.insert("user2".to_string(), 3);
|
||||
|
||||
tracker.load_limits(&config_limits).await;
|
||||
|
||||
assert_eq!(tracker.get_user_limit("user1").await, Some(5));
|
||||
assert_eq!(tracker.get_user_limit("user2").await, Some(3));
|
||||
assert_eq!(tracker.get_user_limit("user3").await, None);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_load_limits_replaces_previous_map() {
|
||||
let tracker = UserIpTracker::new();
|
||||
|
||||
let mut first = HashMap::new();
|
||||
first.insert("user1".to_string(), 2);
|
||||
first.insert("user2".to_string(), 3);
|
||||
tracker.load_limits(&first).await;
|
||||
|
||||
let mut second = HashMap::new();
|
||||
second.insert("user2".to_string(), 5);
|
||||
tracker.load_limits(&second).await;
|
||||
|
||||
assert_eq!(tracker.get_user_limit("user1").await, None);
|
||||
assert_eq!(tracker.get_user_limit("user2").await, Some(5));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_time_window_mode_blocks_recent_ip_churn() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 1).await;
|
||||
tracker
|
||||
.set_limit_policy(UserMaxUniqueIpsMode::TimeWindow, 30)
|
||||
.await;
|
||||
|
||||
let ip1 = test_ipv4(10, 0, 0, 1);
|
||||
let ip2 = test_ipv4(10, 0, 0, 2);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_combined_mode_enforces_active_and_recent_limits() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 1).await;
|
||||
tracker
|
||||
.set_limit_policy(UserMaxUniqueIpsMode::Combined, 30)
|
||||
.await;
|
||||
|
||||
let ip1 = test_ipv4(10, 0, 1, 1);
|
||||
let ip2 = test_ipv4(10, 0, 1, 2);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_time_window_expires() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 1).await;
|
||||
tracker
|
||||
.set_limit_policy(UserMaxUniqueIpsMode::TimeWindow, 1)
|
||||
.await;
|
||||
|
||||
let ip1 = test_ipv4(10, 1, 0, 1);
|
||||
let ip2 = test_ipv4(10, 1, 0, 2);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(1100)).await;
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
}
|
||||
}
|
||||
2293
src/main.rs
2293
src/main.rs
File diff suppressed because it is too large
Load Diff
1903
src/metrics.rs
Normal file
1903
src/metrics.rs
Normal file
File diff suppressed because it is too large
Load Diff
197
src/network/dns_overrides.rs
Normal file
197
src/network/dns_overrides.rs
Normal file
@@ -0,0 +1,197 @@
|
||||
//! Runtime DNS overrides for `host:port` targets.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::net::{IpAddr, Ipv6Addr, SocketAddr};
|
||||
use std::sync::{OnceLock, RwLock};
|
||||
|
||||
use crate::error::{ProxyError, Result};
|
||||
|
||||
type OverrideMap = HashMap<(String, u16), IpAddr>;
|
||||
|
||||
static DNS_OVERRIDES: OnceLock<RwLock<OverrideMap>> = OnceLock::new();
|
||||
|
||||
fn overrides_store() -> &'static RwLock<OverrideMap> {
|
||||
DNS_OVERRIDES.get_or_init(|| RwLock::new(HashMap::new()))
|
||||
}
|
||||
|
||||
fn parse_ip_spec(ip_spec: &str) -> Result<IpAddr> {
|
||||
if ip_spec.starts_with('[') && ip_spec.ends_with(']') {
|
||||
let inner = &ip_spec[1..ip_spec.len() - 1];
|
||||
let ipv6 = inner.parse::<Ipv6Addr>().map_err(|_| {
|
||||
ProxyError::Config(format!(
|
||||
"network.dns_overrides IPv6 override is invalid: '{ip_spec}'"
|
||||
))
|
||||
})?;
|
||||
return Ok(IpAddr::V6(ipv6));
|
||||
}
|
||||
|
||||
let ip = ip_spec.parse::<IpAddr>().map_err(|_| {
|
||||
ProxyError::Config(format!(
|
||||
"network.dns_overrides IP is invalid: '{ip_spec}'"
|
||||
))
|
||||
})?;
|
||||
if matches!(ip, IpAddr::V6(_)) {
|
||||
return Err(ProxyError::Config(format!(
|
||||
"network.dns_overrides IPv6 must be bracketed: '{ip_spec}'"
|
||||
)));
|
||||
}
|
||||
Ok(ip)
|
||||
}
|
||||
|
||||
fn parse_entry(entry: &str) -> Result<((String, u16), IpAddr)> {
|
||||
let trimmed = entry.trim();
|
||||
if trimmed.is_empty() {
|
||||
return Err(ProxyError::Config(
|
||||
"network.dns_overrides entry cannot be empty".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let first_sep = trimmed.find(':').ok_or_else(|| {
|
||||
ProxyError::Config(format!(
|
||||
"network.dns_overrides entry must use host:port:ip format: '{trimmed}'"
|
||||
))
|
||||
})?;
|
||||
let second_sep = trimmed[first_sep + 1..]
|
||||
.find(':')
|
||||
.map(|idx| first_sep + 1 + idx)
|
||||
.ok_or_else(|| {
|
||||
ProxyError::Config(format!(
|
||||
"network.dns_overrides entry must use host:port:ip format: '{trimmed}'"
|
||||
))
|
||||
})?;
|
||||
|
||||
let host = trimmed[..first_sep].trim();
|
||||
let port_str = trimmed[first_sep + 1..second_sep].trim();
|
||||
let ip_str = trimmed[second_sep + 1..].trim();
|
||||
|
||||
if host.is_empty() {
|
||||
return Err(ProxyError::Config(format!(
|
||||
"network.dns_overrides host cannot be empty: '{trimmed}'"
|
||||
)));
|
||||
}
|
||||
if host.contains(':') {
|
||||
return Err(ProxyError::Config(format!(
|
||||
"network.dns_overrides host must be a domain name without ':' in this format: '{trimmed}'"
|
||||
)));
|
||||
}
|
||||
|
||||
let port = port_str.parse::<u16>().map_err(|_| {
|
||||
ProxyError::Config(format!(
|
||||
"network.dns_overrides port is invalid: '{trimmed}'"
|
||||
))
|
||||
})?;
|
||||
let ip = parse_ip_spec(ip_str)?;
|
||||
|
||||
Ok(((host.to_ascii_lowercase(), port), ip))
|
||||
}
|
||||
|
||||
fn parse_entries(entries: &[String]) -> Result<OverrideMap> {
|
||||
let mut parsed = HashMap::new();
|
||||
for entry in entries {
|
||||
let (key, ip) = parse_entry(entry)?;
|
||||
parsed.insert(key, ip);
|
||||
}
|
||||
Ok(parsed)
|
||||
}
|
||||
|
||||
/// Validate `network.dns_overrides` entries without updating runtime state.
|
||||
pub fn validate_entries(entries: &[String]) -> Result<()> {
|
||||
let _ = parse_entries(entries)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Replace runtime DNS overrides with a new validated snapshot.
|
||||
pub fn install_entries(entries: &[String]) -> Result<()> {
|
||||
let parsed = parse_entries(entries)?;
|
||||
let mut guard = overrides_store()
|
||||
.write()
|
||||
.map_err(|_| ProxyError::Config("network.dns_overrides runtime lock is poisoned".to_string()))?;
|
||||
*guard = parsed;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Resolve a hostname override for `(host, port)` if present.
|
||||
pub fn resolve(host: &str, port: u16) -> Option<IpAddr> {
|
||||
let key = (host.to_ascii_lowercase(), port);
|
||||
overrides_store()
|
||||
.read()
|
||||
.ok()
|
||||
.and_then(|guard| guard.get(&key).copied())
|
||||
}
|
||||
|
||||
/// Resolve a hostname override and construct a socket address when present.
|
||||
pub fn resolve_socket_addr(host: &str, port: u16) -> Option<SocketAddr> {
|
||||
resolve(host, port).map(|ip| SocketAddr::new(ip, port))
|
||||
}
|
||||
|
||||
/// Parse a runtime endpoint in `host:port` format.
|
||||
///
|
||||
/// Supports:
|
||||
/// - `example.com:443`
|
||||
/// - `[2001:db8::1]:443`
|
||||
pub fn split_host_port(endpoint: &str) -> Option<(String, u16)> {
|
||||
if endpoint.starts_with('[') {
|
||||
let bracket_end = endpoint.find(']')?;
|
||||
if endpoint.as_bytes().get(bracket_end + 1) != Some(&b':') {
|
||||
return None;
|
||||
}
|
||||
let host = endpoint[1..bracket_end].trim();
|
||||
let port = endpoint[bracket_end + 2..].trim().parse::<u16>().ok()?;
|
||||
if host.is_empty() {
|
||||
return None;
|
||||
}
|
||||
return Some((host.to_ascii_lowercase(), port));
|
||||
}
|
||||
|
||||
let split_idx = endpoint.rfind(':')?;
|
||||
let host = endpoint[..split_idx].trim();
|
||||
let port = endpoint[split_idx + 1..].trim().parse::<u16>().ok()?;
|
||||
if host.is_empty() || host.contains(':') {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some((host.to_ascii_lowercase(), port))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn validate_accepts_ipv4_and_bracketed_ipv6() {
|
||||
let entries = vec![
|
||||
"example.com:443:127.0.0.1".to_string(),
|
||||
"example.net:8443:[2001:db8::10]".to_string(),
|
||||
];
|
||||
assert!(validate_entries(&entries).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_rejects_unbracketed_ipv6() {
|
||||
let entries = vec!["example.net:443:2001:db8::10".to_string()];
|
||||
let err = validate_entries(&entries).unwrap_err().to_string();
|
||||
assert!(err.contains("must be bracketed"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn install_and_resolve_are_case_insensitive_for_host() {
|
||||
let entries = vec!["MyPetrovich.ru:8443:127.0.0.1".to_string()];
|
||||
install_entries(&entries).unwrap();
|
||||
|
||||
let resolved = resolve("mypetrovich.ru", 8443);
|
||||
assert_eq!(resolved, Some("127.0.0.1".parse().unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn split_host_port_parses_supported_shapes() {
|
||||
assert_eq!(
|
||||
split_host_port("example.com:443"),
|
||||
Some(("example.com".to_string(), 443))
|
||||
);
|
||||
assert_eq!(
|
||||
split_host_port("[2001:db8::1]:443"),
|
||||
Some(("2001:db8::1".to_string(), 443))
|
||||
);
|
||||
assert_eq!(split_host_port("2001:db8::1:443"), None);
|
||||
}
|
||||
}
|
||||
5
src/network/mod.rs
Normal file
5
src/network/mod.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
pub mod dns_overrides;
|
||||
pub mod probe;
|
||||
pub mod stun;
|
||||
|
||||
pub use stun::IpFamily;
|
||||
370
src/network/probe.rs
Normal file
370
src/network/probe.rs
Normal file
@@ -0,0 +1,370 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::task::JoinSet;
|
||||
use tokio::time::timeout;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::config::NetworkConfig;
|
||||
use crate::error::Result;
|
||||
use crate::network::stun::{stun_probe_dual, DualStunResult, IpFamily, StunProbeResult};
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct NetworkProbe {
|
||||
pub detected_ipv4: Option<Ipv4Addr>,
|
||||
pub detected_ipv6: Option<Ipv6Addr>,
|
||||
pub reflected_ipv4: Option<SocketAddr>,
|
||||
pub reflected_ipv6: Option<SocketAddr>,
|
||||
pub ipv4_is_bogon: bool,
|
||||
pub ipv6_is_bogon: bool,
|
||||
pub ipv4_nat_detected: bool,
|
||||
pub ipv6_nat_detected: bool,
|
||||
pub ipv4_usable: bool,
|
||||
pub ipv6_usable: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct NetworkDecision {
|
||||
pub ipv4_dc: bool,
|
||||
pub ipv6_dc: bool,
|
||||
pub ipv4_me: bool,
|
||||
pub ipv6_me: bool,
|
||||
pub effective_prefer: u8,
|
||||
pub effective_multipath: bool,
|
||||
}
|
||||
|
||||
impl NetworkDecision {
|
||||
pub fn prefer_ipv6(&self) -> bool {
|
||||
self.effective_prefer == 6
|
||||
}
|
||||
|
||||
pub fn me_families(&self) -> Vec<IpFamily> {
|
||||
let mut res = Vec::new();
|
||||
if self.ipv4_me {
|
||||
res.push(IpFamily::V4);
|
||||
}
|
||||
if self.ipv6_me {
|
||||
res.push(IpFamily::V6);
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
const STUN_BATCH_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
pub async fn run_probe(
|
||||
config: &NetworkConfig,
|
||||
nat_probe: bool,
|
||||
stun_nat_probe_concurrency: usize,
|
||||
) -> Result<NetworkProbe> {
|
||||
let mut probe = NetworkProbe::default();
|
||||
|
||||
probe.detected_ipv4 = detect_local_ip_v4();
|
||||
probe.detected_ipv6 = detect_local_ip_v6();
|
||||
|
||||
probe.ipv4_is_bogon = probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false);
|
||||
probe.ipv6_is_bogon = probe.detected_ipv6.map(is_bogon_v6).unwrap_or(false);
|
||||
|
||||
let stun_res = if nat_probe && config.stun_use {
|
||||
let servers = collect_stun_servers(config);
|
||||
if servers.is_empty() {
|
||||
warn!("STUN probe is enabled but network.stun_servers is empty");
|
||||
DualStunResult::default()
|
||||
} else {
|
||||
probe_stun_servers_parallel(
|
||||
&servers,
|
||||
stun_nat_probe_concurrency.max(1),
|
||||
)
|
||||
.await
|
||||
}
|
||||
} else if nat_probe {
|
||||
info!("STUN probe is disabled by network.stun_use=false");
|
||||
DualStunResult::default()
|
||||
} else {
|
||||
DualStunResult::default()
|
||||
};
|
||||
probe.reflected_ipv4 = stun_res.v4.map(|r| r.reflected_addr);
|
||||
probe.reflected_ipv6 = stun_res.v6.map(|r| r.reflected_addr);
|
||||
|
||||
// If STUN is blocked but IPv4 is private, try HTTP public-IP fallback.
|
||||
if nat_probe
|
||||
&& probe.reflected_ipv4.is_none()
|
||||
&& probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false)
|
||||
{
|
||||
if let Some(public_ip) = detect_public_ipv4_http(&config.http_ip_detect_urls).await {
|
||||
probe.reflected_ipv4 = Some(SocketAddr::new(IpAddr::V4(public_ip), 0));
|
||||
info!(public_ip = %public_ip, "STUN unavailable, using HTTP public IPv4 fallback");
|
||||
}
|
||||
}
|
||||
|
||||
probe.ipv4_nat_detected = match (probe.detected_ipv4, probe.reflected_ipv4) {
|
||||
(Some(det), Some(reflected)) => det != reflected.ip(),
|
||||
_ => false,
|
||||
};
|
||||
probe.ipv6_nat_detected = match (probe.detected_ipv6, probe.reflected_ipv6) {
|
||||
(Some(det), Some(reflected)) => det != reflected.ip(),
|
||||
_ => false,
|
||||
};
|
||||
|
||||
probe.ipv4_usable = config.ipv4
|
||||
&& probe.detected_ipv4.is_some()
|
||||
&& (!probe.ipv4_is_bogon || probe.reflected_ipv4.map(|r| !is_bogon(r.ip())).unwrap_or(false));
|
||||
|
||||
let ipv6_enabled = config.ipv6.unwrap_or(probe.detected_ipv6.is_some());
|
||||
probe.ipv6_usable = ipv6_enabled
|
||||
&& probe.detected_ipv6.is_some()
|
||||
&& (!probe.ipv6_is_bogon || probe.reflected_ipv6.map(|r| !is_bogon(r.ip())).unwrap_or(false));
|
||||
|
||||
Ok(probe)
|
||||
}
|
||||
|
||||
async fn detect_public_ipv4_http(urls: &[String]) -> Option<Ipv4Addr> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(Duration::from_secs(3))
|
||||
.build()
|
||||
.ok()?;
|
||||
|
||||
for url in urls {
|
||||
let response = match client.get(url).send().await {
|
||||
Ok(response) => response,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
let body = match response.text().await {
|
||||
Ok(body) => body,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
let Ok(ip) = body.trim().parse::<Ipv4Addr>() else {
|
||||
continue;
|
||||
};
|
||||
if !is_bogon_v4(ip) {
|
||||
return Some(ip);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn collect_stun_servers(config: &NetworkConfig) -> Vec<String> {
|
||||
let mut out = Vec::new();
|
||||
for s in &config.stun_servers {
|
||||
if !s.is_empty() && !out.contains(s) {
|
||||
out.push(s.clone());
|
||||
}
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
async fn probe_stun_servers_parallel(
|
||||
servers: &[String],
|
||||
concurrency: usize,
|
||||
) -> DualStunResult {
|
||||
let mut join_set = JoinSet::new();
|
||||
let mut next_idx = 0usize;
|
||||
let mut best_v4_by_ip: HashMap<IpAddr, (usize, StunProbeResult)> = HashMap::new();
|
||||
let mut best_v6_by_ip: HashMap<IpAddr, (usize, StunProbeResult)> = HashMap::new();
|
||||
|
||||
while next_idx < servers.len() || !join_set.is_empty() {
|
||||
while next_idx < servers.len() && join_set.len() < concurrency {
|
||||
let stun_addr = servers[next_idx].clone();
|
||||
next_idx += 1;
|
||||
join_set.spawn(async move {
|
||||
let res = timeout(STUN_BATCH_TIMEOUT, stun_probe_dual(&stun_addr)).await;
|
||||
(stun_addr, res)
|
||||
});
|
||||
}
|
||||
|
||||
let Some(task) = join_set.join_next().await else {
|
||||
break;
|
||||
};
|
||||
|
||||
match task {
|
||||
Ok((stun_addr, Ok(Ok(result)))) => {
|
||||
if let Some(v4) = result.v4 {
|
||||
let entry = best_v4_by_ip.entry(v4.reflected_addr.ip()).or_insert((0, v4));
|
||||
entry.0 += 1;
|
||||
}
|
||||
if let Some(v6) = result.v6 {
|
||||
let entry = best_v6_by_ip.entry(v6.reflected_addr.ip()).or_insert((0, v6));
|
||||
entry.0 += 1;
|
||||
}
|
||||
if result.v4.is_some() || result.v6.is_some() {
|
||||
debug!(stun = %stun_addr, "STUN server responded within probe timeout");
|
||||
}
|
||||
}
|
||||
Ok((stun_addr, Ok(Err(e)))) => {
|
||||
debug!(error = %e, stun = %stun_addr, "STUN probe failed");
|
||||
}
|
||||
Ok((stun_addr, Err(_))) => {
|
||||
debug!(stun = %stun_addr, "STUN probe timeout");
|
||||
}
|
||||
Err(e) => {
|
||||
debug!(error = %e, "STUN probe task join failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut out = DualStunResult::default();
|
||||
if let Some((_, best)) = best_v4_by_ip
|
||||
.into_values()
|
||||
.max_by_key(|(count, _)| *count)
|
||||
{
|
||||
info!("STUN-Quorum reached, IP: {}", best.reflected_addr.ip());
|
||||
out.v4 = Some(best);
|
||||
}
|
||||
if let Some((_, best)) = best_v6_by_ip
|
||||
.into_values()
|
||||
.max_by_key(|(count, _)| *count)
|
||||
{
|
||||
info!("STUN-Quorum reached, IP: {}", best.reflected_addr.ip());
|
||||
out.v6 = Some(best);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
pub fn decide_network_capabilities(config: &NetworkConfig, probe: &NetworkProbe) -> NetworkDecision {
|
||||
let ipv4_dc = config.ipv4 && probe.detected_ipv4.is_some();
|
||||
let ipv6_dc = config.ipv6.unwrap_or(probe.detected_ipv6.is_some()) && probe.detected_ipv6.is_some();
|
||||
|
||||
let ipv4_me = config.ipv4
|
||||
&& probe.detected_ipv4.is_some()
|
||||
&& (!probe.ipv4_is_bogon || probe.reflected_ipv4.is_some());
|
||||
|
||||
let ipv6_enabled = config.ipv6.unwrap_or(probe.detected_ipv6.is_some());
|
||||
let ipv6_me = ipv6_enabled
|
||||
&& probe.detected_ipv6.is_some()
|
||||
&& (!probe.ipv6_is_bogon || probe.reflected_ipv6.is_some());
|
||||
|
||||
let effective_prefer = match config.prefer {
|
||||
6 if ipv6_me || ipv6_dc => 6,
|
||||
4 if ipv4_me || ipv4_dc => 4,
|
||||
6 => {
|
||||
warn!("prefer=6 requested but IPv6 unavailable; falling back to IPv4");
|
||||
4
|
||||
}
|
||||
_ => 4,
|
||||
};
|
||||
|
||||
let me_families = ipv4_me as u8 + ipv6_me as u8;
|
||||
let effective_multipath = config.multipath && me_families >= 2;
|
||||
|
||||
NetworkDecision {
|
||||
ipv4_dc,
|
||||
ipv6_dc,
|
||||
ipv4_me,
|
||||
ipv6_me,
|
||||
effective_prefer,
|
||||
effective_multipath,
|
||||
}
|
||||
}
|
||||
|
||||
fn detect_local_ip_v4() -> Option<Ipv4Addr> {
|
||||
let socket = UdpSocket::bind("0.0.0.0:0").ok()?;
|
||||
socket.connect("8.8.8.8:80").ok()?;
|
||||
match socket.local_addr().ok()?.ip() {
|
||||
IpAddr::V4(v4) => Some(v4),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn detect_local_ip_v6() -> Option<Ipv6Addr> {
|
||||
let socket = UdpSocket::bind("[::]:0").ok()?;
|
||||
socket.connect("[2001:4860:4860::8888]:80").ok()?;
|
||||
match socket.local_addr().ok()?.ip() {
|
||||
IpAddr::V6(v6) => Some(v6),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_bogon(ip: IpAddr) -> bool {
|
||||
match ip {
|
||||
IpAddr::V4(v4) => is_bogon_v4(v4),
|
||||
IpAddr::V6(v6) => is_bogon_v6(v6),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_bogon_v4(ip: Ipv4Addr) -> bool {
|
||||
let octets = ip.octets();
|
||||
if ip.is_private() || ip.is_loopback() || ip.is_link_local() {
|
||||
return true;
|
||||
}
|
||||
if octets[0] == 0 {
|
||||
return true;
|
||||
}
|
||||
if octets[0] == 100 && (octets[1] & 0xC0) == 64 {
|
||||
return true;
|
||||
}
|
||||
if octets[0] == 192 && octets[1] == 0 && octets[2] == 0 {
|
||||
return true;
|
||||
}
|
||||
if octets[0] == 192 && octets[1] == 0 && octets[2] == 2 {
|
||||
return true;
|
||||
}
|
||||
if octets[0] == 198 && (octets[1] & 0xFE) == 18 {
|
||||
return true;
|
||||
}
|
||||
if octets[0] == 198 && octets[1] == 51 && octets[2] == 100 {
|
||||
return true;
|
||||
}
|
||||
if octets[0] == 203 && octets[1] == 0 && octets[2] == 113 {
|
||||
return true;
|
||||
}
|
||||
if ip.is_multicast() {
|
||||
return true;
|
||||
}
|
||||
if octets[0] >= 240 {
|
||||
return true;
|
||||
}
|
||||
if ip.is_broadcast() {
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub fn is_bogon_v6(ip: Ipv6Addr) -> bool {
|
||||
if ip.is_unspecified() || ip.is_loopback() || ip.is_unique_local() {
|
||||
return true;
|
||||
}
|
||||
let segs = ip.segments();
|
||||
if (segs[0] & 0xFFC0) == 0xFE80 {
|
||||
return true;
|
||||
}
|
||||
if segs[0..5] == [0, 0, 0, 0, 0] && segs[5] == 0xFFFF {
|
||||
return true;
|
||||
}
|
||||
if segs[0] == 0x0100 && segs[1..4] == [0, 0, 0] {
|
||||
return true;
|
||||
}
|
||||
if segs[0] == 0x2001 && segs[1] == 0x0db8 {
|
||||
return true;
|
||||
}
|
||||
if segs[0] == 0x2002 {
|
||||
return true;
|
||||
}
|
||||
if ip.is_multicast() {
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub fn log_probe_result(probe: &NetworkProbe, decision: &NetworkDecision) {
|
||||
info!(
|
||||
ipv4 = probe.detected_ipv4.as_ref().map(|v| v.to_string()).unwrap_or_else(|| "-".into()),
|
||||
ipv6 = probe.detected_ipv6.as_ref().map(|v| v.to_string()).unwrap_or_else(|| "-".into()),
|
||||
reflected_v4 = probe.reflected_ipv4.as_ref().map(|v| v.ip().to_string()).unwrap_or_else(|| "-".into()),
|
||||
reflected_v6 = probe.reflected_ipv6.as_ref().map(|v| v.ip().to_string()).unwrap_or_else(|| "-".into()),
|
||||
ipv4_bogon = probe.ipv4_is_bogon,
|
||||
ipv6_bogon = probe.ipv6_is_bogon,
|
||||
ipv4_me = decision.ipv4_me,
|
||||
ipv6_me = decision.ipv6_me,
|
||||
ipv4_dc = decision.ipv4_dc,
|
||||
ipv6_dc = decision.ipv6_dc,
|
||||
prefer = decision.effective_prefer,
|
||||
multipath = decision.effective_multipath,
|
||||
"Network capabilities resolved"
|
||||
);
|
||||
}
|
||||
234
src/network/stun.rs
Normal file
234
src/network/stun.rs
Normal file
@@ -0,0 +1,234 @@
|
||||
#![allow(unreachable_code)]
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
|
||||
|
||||
use tokio::net::{lookup_host, UdpSocket};
|
||||
use tokio::time::{timeout, Duration, sleep};
|
||||
|
||||
use crate::error::{ProxyError, Result};
|
||||
use crate::network::dns_overrides::{resolve, split_host_port};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum IpFamily {
|
||||
V4,
|
||||
V6,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct StunProbeResult {
|
||||
pub local_addr: SocketAddr,
|
||||
pub reflected_addr: SocketAddr,
|
||||
pub family: IpFamily,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct DualStunResult {
|
||||
pub v4: Option<StunProbeResult>,
|
||||
pub v6: Option<StunProbeResult>,
|
||||
}
|
||||
|
||||
pub async fn stun_probe_dual(stun_addr: &str) -> Result<DualStunResult> {
|
||||
let (v4, v6) = tokio::join!(
|
||||
stun_probe_family(stun_addr, IpFamily::V4),
|
||||
stun_probe_family(stun_addr, IpFamily::V6),
|
||||
);
|
||||
|
||||
Ok(DualStunResult {
|
||||
v4: v4?,
|
||||
v6: v6?,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn stun_probe_family(stun_addr: &str, family: IpFamily) -> Result<Option<StunProbeResult>> {
|
||||
stun_probe_family_with_bind(stun_addr, family, None).await
|
||||
}
|
||||
|
||||
pub async fn stun_probe_family_with_bind(
|
||||
stun_addr: &str,
|
||||
family: IpFamily,
|
||||
bind_ip: Option<IpAddr>,
|
||||
) -> Result<Option<StunProbeResult>> {
|
||||
use rand::RngCore;
|
||||
|
||||
let bind_addr = match (family, bind_ip) {
|
||||
(IpFamily::V4, Some(IpAddr::V4(ip))) => SocketAddr::new(IpAddr::V4(ip), 0),
|
||||
(IpFamily::V6, Some(IpAddr::V6(ip))) => SocketAddr::new(IpAddr::V6(ip), 0),
|
||||
(IpFamily::V4, Some(IpAddr::V6(_))) | (IpFamily::V6, Some(IpAddr::V4(_))) => {
|
||||
return Ok(None);
|
||||
}
|
||||
(IpFamily::V4, None) => SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0),
|
||||
(IpFamily::V6, None) => SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0),
|
||||
};
|
||||
|
||||
let socket = match UdpSocket::bind(bind_addr).await {
|
||||
Ok(socket) => socket,
|
||||
Err(_) if bind_ip.is_some() => return Ok(None),
|
||||
Err(e) => return Err(ProxyError::Proxy(format!("STUN bind failed: {e}"))),
|
||||
};
|
||||
|
||||
let target_addr = resolve_stun_addr(stun_addr, family).await?;
|
||||
if let Some(addr) = target_addr {
|
||||
match socket.connect(addr).await {
|
||||
Ok(()) => {}
|
||||
Err(e) if family == IpFamily::V6 && matches!(
|
||||
e.kind(),
|
||||
std::io::ErrorKind::NetworkUnreachable
|
||||
| std::io::ErrorKind::HostUnreachable
|
||||
| std::io::ErrorKind::Unsupported
|
||||
| std::io::ErrorKind::NetworkDown
|
||||
) => return Ok(None),
|
||||
Err(e) => return Err(ProxyError::Proxy(format!("STUN connect failed: {e}"))),
|
||||
}
|
||||
} else {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let mut req = [0u8; 20];
|
||||
req[0..2].copy_from_slice(&0x0001u16.to_be_bytes()); // Binding Request
|
||||
req[2..4].copy_from_slice(&0u16.to_be_bytes()); // length
|
||||
req[4..8].copy_from_slice(&0x2112A442u32.to_be_bytes()); // magic cookie
|
||||
rand::rng().fill_bytes(&mut req[8..20]); // transaction ID
|
||||
|
||||
let mut buf = [0u8; 256];
|
||||
let mut attempt = 0;
|
||||
let mut backoff = Duration::from_secs(1);
|
||||
loop {
|
||||
socket
|
||||
.send(&req)
|
||||
.await
|
||||
.map_err(|e| ProxyError::Proxy(format!("STUN send failed: {e}")))?;
|
||||
|
||||
let recv_res = timeout(Duration::from_secs(3), socket.recv(&mut buf)).await;
|
||||
let n = match recv_res {
|
||||
Ok(Ok(n)) => n,
|
||||
Ok(Err(e)) => return Err(ProxyError::Proxy(format!("STUN recv failed: {e}"))),
|
||||
Err(_) => {
|
||||
attempt += 1;
|
||||
if attempt >= 3 {
|
||||
return Ok(None);
|
||||
}
|
||||
sleep(backoff).await;
|
||||
backoff *= 2;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
if n < 20 {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let magic = 0x2112A442u32.to_be_bytes();
|
||||
let txid = &req[8..20];
|
||||
let mut idx = 20;
|
||||
while idx + 4 <= n {
|
||||
let atype = u16::from_be_bytes(buf[idx..idx + 2].try_into().unwrap());
|
||||
let alen = u16::from_be_bytes(buf[idx + 2..idx + 4].try_into().unwrap()) as usize;
|
||||
idx += 4;
|
||||
if idx + alen > n {
|
||||
break;
|
||||
}
|
||||
|
||||
match atype {
|
||||
0x0020 /* XOR-MAPPED-ADDRESS */ | 0x0001 /* MAPPED-ADDRESS */ => {
|
||||
if alen < 8 {
|
||||
break;
|
||||
}
|
||||
let family_byte = buf[idx + 1];
|
||||
let port_bytes = [buf[idx + 2], buf[idx + 3]];
|
||||
let len_check = match family_byte {
|
||||
0x01 => 4,
|
||||
0x02 => 16,
|
||||
_ => 0,
|
||||
};
|
||||
if len_check == 0 || alen < 4 + len_check {
|
||||
break;
|
||||
}
|
||||
|
||||
let raw_ip = &buf[idx + 4..idx + 4 + len_check];
|
||||
let mut port = u16::from_be_bytes(port_bytes);
|
||||
|
||||
let reflected_ip = if atype == 0x0020 {
|
||||
port ^= ((magic[0] as u16) << 8) | magic[1] as u16;
|
||||
match family_byte {
|
||||
0x01 => {
|
||||
let ip = [
|
||||
raw_ip[0] ^ magic[0],
|
||||
raw_ip[1] ^ magic[1],
|
||||
raw_ip[2] ^ magic[2],
|
||||
raw_ip[3] ^ magic[3],
|
||||
];
|
||||
IpAddr::V4(Ipv4Addr::new(ip[0], ip[1], ip[2], ip[3]))
|
||||
}
|
||||
0x02 => {
|
||||
let mut ip = [0u8; 16];
|
||||
let xor_key = [magic.as_slice(), txid].concat();
|
||||
for (i, b) in raw_ip.iter().enumerate().take(16) {
|
||||
ip[i] = *b ^ xor_key[i];
|
||||
}
|
||||
IpAddr::V6(Ipv6Addr::from(ip))
|
||||
}
|
||||
_ => {
|
||||
idx += (alen + 3) & !3;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match family_byte {
|
||||
0x01 => IpAddr::V4(Ipv4Addr::new(raw_ip[0], raw_ip[1], raw_ip[2], raw_ip[3])),
|
||||
0x02 => IpAddr::V6(Ipv6Addr::from(<[u8; 16]>::try_from(raw_ip).unwrap())),
|
||||
_ => {
|
||||
idx += (alen + 3) & !3;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let reflected_addr = SocketAddr::new(reflected_ip, port);
|
||||
let local_addr = socket
|
||||
.local_addr()
|
||||
.map_err(|e| ProxyError::Proxy(format!("STUN local_addr failed: {e}")))?;
|
||||
|
||||
return Ok(Some(StunProbeResult {
|
||||
local_addr,
|
||||
reflected_addr,
|
||||
family,
|
||||
}));
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
idx += (alen + 3) & !3;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn resolve_stun_addr(stun_addr: &str, family: IpFamily) -> Result<Option<SocketAddr>> {
|
||||
if let Ok(addr) = stun_addr.parse::<SocketAddr>() {
|
||||
return Ok(match (addr.is_ipv4(), family) {
|
||||
(true, IpFamily::V4) | (false, IpFamily::V6) => Some(addr),
|
||||
_ => None,
|
||||
});
|
||||
}
|
||||
|
||||
if let Some((host, port)) = split_host_port(stun_addr)
|
||||
&& let Some(ip) = resolve(&host, port)
|
||||
{
|
||||
let addr = SocketAddr::new(ip, port);
|
||||
return Ok(match (addr.is_ipv4(), family) {
|
||||
(true, IpFamily::V4) | (false, IpFamily::V6) => Some(addr),
|
||||
_ => None,
|
||||
});
|
||||
}
|
||||
|
||||
let mut addrs = lookup_host(stun_addr)
|
||||
.await
|
||||
.map_err(|e| ProxyError::Proxy(format!("STUN resolve failed: {e}")))?;
|
||||
|
||||
let target = addrs
|
||||
.find(|a| matches!((a.is_ipv4(), family), (true, IpFamily::V4) | (false, IpFamily::V6)));
|
||||
Ok(target)
|
||||
}
|
||||
@@ -1,13 +1,17 @@
|
||||
//! Protocol constants and datacenter addresses
|
||||
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
||||
use once_cell::sync::Lazy;
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
|
||||
use crate::crypto::SecureRandom;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
// ============= Telegram Datacenters =============
|
||||
|
||||
pub const TG_DATACENTER_PORT: u16 = 443;
|
||||
|
||||
pub static TG_DATACENTERS_V4: Lazy<Vec<IpAddr>> = Lazy::new(|| {
|
||||
pub static TG_DATACENTERS_V4: LazyLock<Vec<IpAddr>> = LazyLock::new(|| {
|
||||
vec![
|
||||
IpAddr::V4(Ipv4Addr::new(149, 154, 175, 50)),
|
||||
IpAddr::V4(Ipv4Addr::new(149, 154, 167, 51)),
|
||||
@@ -17,7 +21,7 @@ pub static TG_DATACENTERS_V4: Lazy<Vec<IpAddr>> = Lazy::new(|| {
|
||||
]
|
||||
});
|
||||
|
||||
pub static TG_DATACENTERS_V6: Lazy<Vec<IpAddr>> = Lazy::new(|| {
|
||||
pub static TG_DATACENTERS_V6: LazyLock<Vec<IpAddr>> = LazyLock::new(|| {
|
||||
vec![
|
||||
IpAddr::V6("2001:b28:f23d:f001::a".parse().unwrap()),
|
||||
IpAddr::V6("2001:67c:04e8:f002::a".parse().unwrap()),
|
||||
@@ -29,8 +33,8 @@ pub static TG_DATACENTERS_V6: Lazy<Vec<IpAddr>> = Lazy::new(|| {
|
||||
|
||||
// ============= Middle Proxies (for advertising) =============
|
||||
|
||||
pub static TG_MIDDLE_PROXIES_V4: Lazy<std::collections::HashMap<i32, Vec<(IpAddr, u16)>>> =
|
||||
Lazy::new(|| {
|
||||
pub static TG_MIDDLE_PROXIES_V4: LazyLock<std::collections::HashMap<i32, Vec<(IpAddr, u16)>>> =
|
||||
LazyLock::new(|| {
|
||||
let mut m = std::collections::HashMap::new();
|
||||
m.insert(1, vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 50)), 8888)]);
|
||||
m.insert(-1, vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 50)), 8888)]);
|
||||
@@ -45,8 +49,8 @@ pub static TG_MIDDLE_PROXIES_V4: Lazy<std::collections::HashMap<i32, Vec<(IpAddr
|
||||
m
|
||||
});
|
||||
|
||||
pub static TG_MIDDLE_PROXIES_V6: Lazy<std::collections::HashMap<i32, Vec<(IpAddr, u16)>>> =
|
||||
Lazy::new(|| {
|
||||
pub static TG_MIDDLE_PROXIES_V6: LazyLock<std::collections::HashMap<i32, Vec<(IpAddr, u16)>>> =
|
||||
LazyLock::new(|| {
|
||||
let mut m = std::collections::HashMap::new();
|
||||
m.insert(1, vec![(IpAddr::V6("2001:b28:f23d:f001::d".parse().unwrap()), 8888)]);
|
||||
m.insert(-1, vec![(IpAddr::V6("2001:b28:f23d:f001::d".parse().unwrap()), 8888)]);
|
||||
@@ -151,7 +155,32 @@ pub const TLS_RECORD_ALERT: u8 = 0x15;
|
||||
/// Maximum TLS record size
|
||||
pub const MAX_TLS_RECORD_SIZE: usize = 16384;
|
||||
/// Maximum TLS chunk size (with overhead)
|
||||
pub const MAX_TLS_CHUNK_SIZE: usize = 16384 + 24;
|
||||
/// RFC 8446 §5.2 allows up to 16384 + 256 bytes of ciphertext
|
||||
pub const MAX_TLS_CHUNK_SIZE: usize = 16384 + 256;
|
||||
|
||||
/// Secure Intermediate payload is expected to be 4-byte aligned.
|
||||
pub fn is_valid_secure_payload_len(data_len: usize) -> bool {
|
||||
data_len.is_multiple_of(4)
|
||||
}
|
||||
|
||||
/// Compute Secure Intermediate payload length from wire length.
|
||||
/// Secure mode strips up to 3 random tail bytes by truncating to 4-byte boundary.
|
||||
pub fn secure_payload_len_from_wire_len(wire_len: usize) -> Option<usize> {
|
||||
if wire_len < 4 {
|
||||
return None;
|
||||
}
|
||||
Some(wire_len - (wire_len % 4))
|
||||
}
|
||||
|
||||
/// Generate padding length for Secure Intermediate protocol.
|
||||
/// Data must be 4-byte aligned; padding is 1..=3 so total is never divisible by 4.
|
||||
pub fn secure_padding_len(data_len: usize, rng: &SecureRandom) -> usize {
|
||||
debug_assert!(
|
||||
is_valid_secure_payload_len(data_len),
|
||||
"Secure payload must be 4-byte aligned, got {data_len}"
|
||||
);
|
||||
rng.range(3) + 1
|
||||
}
|
||||
|
||||
// ============= Timeouts =============
|
||||
|
||||
@@ -167,7 +196,8 @@ pub const DEFAULT_ACK_TIMEOUT_SECS: u64 = 300;
|
||||
// ============= Buffer Sizes =============
|
||||
|
||||
/// Default buffer size
|
||||
pub const DEFAULT_BUFFER_SIZE: usize = 65536;
|
||||
pub const DEFAULT_BUFFER_SIZE: usize = 16384;
|
||||
|
||||
/// Small buffer size for bad client handling
|
||||
pub const SMALL_BUFFER_SIZE: usize = 8192;
|
||||
|
||||
@@ -201,6 +231,16 @@ pub static RESERVED_NONCE_CONTINUES: &[[u8; 4]] = &[
|
||||
// ============= RPC Constants (for Middle Proxy) =============
|
||||
|
||||
/// RPC Proxy Request
|
||||
/// RPC Flags (from Erlang mtp_rpc.erl)
|
||||
pub const RPC_FLAG_NOT_ENCRYPTED: u32 = 0x2;
|
||||
pub const RPC_FLAG_HAS_AD_TAG: u32 = 0x8;
|
||||
pub const RPC_FLAG_MAGIC: u32 = 0x1000;
|
||||
pub const RPC_FLAG_EXTMODE2: u32 = 0x20000;
|
||||
pub const RPC_FLAG_PAD: u32 = 0x8000000;
|
||||
pub const RPC_FLAG_INTERMEDIATE: u32 = 0x20000000;
|
||||
pub const RPC_FLAG_ABRIDGED: u32 = 0x40000000;
|
||||
pub const RPC_FLAG_QUICKACK: u32 = 0x80000000;
|
||||
|
||||
pub const RPC_PROXY_REQ: [u8; 4] = [0xee, 0xf1, 0xce, 0x36];
|
||||
/// RPC Proxy Answer
|
||||
pub const RPC_PROXY_ANS: [u8; 4] = [0x0d, 0xda, 0x03, 0x44];
|
||||
@@ -227,7 +267,60 @@ pub mod rpc_flags {
|
||||
pub const FLAG_QUICKACK: u32 = 0x80000000;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
// ============= Middle-End Proxy Servers =============
|
||||
pub const ME_PROXY_PORT: u16 = 8888;
|
||||
|
||||
pub static TG_MIDDLE_PROXIES_FLAT_V4: LazyLock<Vec<(IpAddr, u16)>> = LazyLock::new(|| {
|
||||
vec![
|
||||
(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 50)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(149, 154, 161, 144)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 100)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(91, 108, 4, 136)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(91, 108, 56, 183)), 8888),
|
||||
]
|
||||
});
|
||||
|
||||
// ============= RPC Constants (u32 native endian) =============
|
||||
// From mtproto-common.h + net-tcp-rpc-common.h + mtproto-proxy.c
|
||||
|
||||
pub const RPC_NONCE_U32: u32 = 0x7acb87aa;
|
||||
pub const RPC_HANDSHAKE_U32: u32 = 0x7682eef5;
|
||||
pub const RPC_HANDSHAKE_ERROR_U32: u32 = 0x6a27beda;
|
||||
pub const TL_PROXY_TAG_U32: u32 = 0xdb1e26ae; // mtproto-proxy.c:121
|
||||
|
||||
// mtproto-common.h
|
||||
pub const RPC_PROXY_REQ_U32: u32 = 0x36cef1ee;
|
||||
pub const RPC_PROXY_ANS_U32: u32 = 0x4403da0d;
|
||||
pub const RPC_CLOSE_CONN_U32: u32 = 0x1fcf425d;
|
||||
pub const RPC_CLOSE_EXT_U32: u32 = 0x5eb634a2;
|
||||
pub const RPC_SIMPLE_ACK_U32: u32 = 0x3bac409b;
|
||||
pub const RPC_PING_U32: u32 = 0x5730a2df;
|
||||
pub const RPC_PONG_U32: u32 = 0x8430eaa7;
|
||||
|
||||
pub const RPC_CRYPTO_NONE_U32: u32 = 0;
|
||||
pub const RPC_CRYPTO_AES_U32: u32 = 1;
|
||||
|
||||
pub mod proxy_flags {
|
||||
pub const FLAG_HAS_AD_TAG: u32 = 1;
|
||||
pub const FLAG_NOT_ENCRYPTED: u32 = 0x2;
|
||||
pub const FLAG_HAS_AD_TAG2: u32 = 0x8;
|
||||
pub const FLAG_MAGIC: u32 = 0x1000;
|
||||
pub const FLAG_EXTMODE2: u32 = 0x20000;
|
||||
pub const FLAG_PAD: u32 = 0x8000000;
|
||||
pub const FLAG_INTERMEDIATE: u32 = 0x20000000;
|
||||
pub const FLAG_ABRIDGED: u32 = 0x40000000;
|
||||
pub const FLAG_QUICKACK: u32 = 0x80000000;
|
||||
}
|
||||
|
||||
pub mod rpc_crypto_flags {
|
||||
pub const USE_CRC32C: u32 = 0x800;
|
||||
}
|
||||
|
||||
pub const ME_CONNECT_TIMEOUT_SECS: u64 = 5;
|
||||
pub const ME_HANDSHAKE_TIMEOUT_SECS: u64 = 10;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -258,4 +351,43 @@ mod tests {
|
||||
assert_eq!(TG_DATACENTERS_V4.len(), 5);
|
||||
assert_eq!(TG_DATACENTERS_V6.len(), 5);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn secure_padding_never_produces_aligned_total() {
|
||||
let rng = SecureRandom::new();
|
||||
for data_len in (0..1000).step_by(4) {
|
||||
for _ in 0..100 {
|
||||
let padding = secure_padding_len(data_len, &rng);
|
||||
assert!(
|
||||
padding <= 3,
|
||||
"padding out of range: data_len={data_len}, padding={padding}"
|
||||
);
|
||||
assert_ne!(
|
||||
(data_len + padding) % 4,
|
||||
0,
|
||||
"invariant violated: data_len={data_len}, padding={padding}, total={}",
|
||||
data_len + padding
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn secure_wire_len_roundtrip_for_aligned_payload() {
|
||||
for payload_len in (4..4096).step_by(4) {
|
||||
for padding in 0..=3usize {
|
||||
let wire_len = payload_len + padding;
|
||||
let recovered = secure_payload_len_from_wire_len(wire_len);
|
||||
assert_eq!(recovered, Some(payload_len));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn secure_wire_len_rejects_too_short_frames() {
|
||||
assert_eq!(secure_payload_len_from_wire_len(0), None);
|
||||
assert_eq!(secure_payload_len_from_wire_len(1), None);
|
||||
assert_eq!(secure_payload_len_from_wire_len(2), None);
|
||||
assert_eq!(secure_payload_len_from_wire_len(3), None);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
//! MTProto frame types and metadata
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Extra metadata associated with a frame
|
||||
@@ -83,7 +85,7 @@ impl FrameMode {
|
||||
pub fn validate_message_length(len: usize) -> bool {
|
||||
use super::constants::{MIN_MSG_LEN, MAX_MSG_LEN, PADDING_FILLER};
|
||||
|
||||
len >= MIN_MSG_LEN && len <= MAX_MSG_LEN && len % PADDING_FILLER.len() == 0
|
||||
(MIN_MSG_LEN..=MAX_MSG_LEN).contains(&len) && len.is_multiple_of(PADDING_FILLER.len())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -5,7 +5,11 @@ pub mod frame;
|
||||
pub mod obfuscation;
|
||||
pub mod tls;
|
||||
|
||||
#[allow(unused_imports)]
|
||||
pub use constants::*;
|
||||
#[allow(unused_imports)]
|
||||
pub use frame::*;
|
||||
#[allow(unused_imports)]
|
||||
pub use obfuscation::*;
|
||||
#[allow(unused_imports)]
|
||||
pub use tls::*;
|
||||
@@ -1,10 +1,14 @@
|
||||
//! MTProto Obfuscation
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use zeroize::Zeroize;
|
||||
use crate::crypto::{sha256, AesCtr};
|
||||
use crate::error::Result;
|
||||
use super::constants::*;
|
||||
|
||||
/// Obfuscation parameters from handshake
|
||||
///
|
||||
/// Key material is zeroized on drop.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ObfuscationParams {
|
||||
/// Key for decrypting client -> proxy traffic
|
||||
@@ -21,25 +25,31 @@ pub struct ObfuscationParams {
|
||||
pub dc_idx: i16,
|
||||
}
|
||||
|
||||
impl Drop for ObfuscationParams {
|
||||
fn drop(&mut self) {
|
||||
self.decrypt_key.zeroize();
|
||||
self.decrypt_iv.zeroize();
|
||||
self.encrypt_key.zeroize();
|
||||
self.encrypt_iv.zeroize();
|
||||
}
|
||||
}
|
||||
|
||||
impl ObfuscationParams {
|
||||
/// Parse obfuscation parameters from handshake bytes
|
||||
/// Returns None if handshake doesn't match any user secret
|
||||
pub fn from_handshake(
|
||||
handshake: &[u8; HANDSHAKE_LEN],
|
||||
secrets: &[(String, Vec<u8>)], // (username, secret_bytes)
|
||||
secrets: &[(String, Vec<u8>)],
|
||||
) -> Option<(Self, String)> {
|
||||
// Extract prekey and IV for decryption
|
||||
let dec_prekey_iv = &handshake[SKIP_LEN..SKIP_LEN + PREKEY_LEN + IV_LEN];
|
||||
let dec_prekey = &dec_prekey_iv[..PREKEY_LEN];
|
||||
let dec_iv_bytes = &dec_prekey_iv[PREKEY_LEN..];
|
||||
|
||||
// Reversed for encryption direction
|
||||
let enc_prekey_iv: Vec<u8> = dec_prekey_iv.iter().rev().copied().collect();
|
||||
let enc_prekey = &enc_prekey_iv[..PREKEY_LEN];
|
||||
let enc_iv_bytes = &enc_prekey_iv[PREKEY_LEN..];
|
||||
|
||||
for (username, secret) in secrets {
|
||||
// Derive decryption key
|
||||
let mut dec_key_input = Vec::with_capacity(PREKEY_LEN + secret.len());
|
||||
dec_key_input.extend_from_slice(dec_prekey);
|
||||
dec_key_input.extend_from_slice(secret);
|
||||
@@ -47,26 +57,22 @@ impl ObfuscationParams {
|
||||
|
||||
let decrypt_iv = u128::from_be_bytes(dec_iv_bytes.try_into().unwrap());
|
||||
|
||||
// Create decryptor and decrypt handshake
|
||||
let mut decryptor = AesCtr::new(&decrypt_key, decrypt_iv);
|
||||
let decrypted = decryptor.decrypt(handshake);
|
||||
|
||||
// Check protocol tag
|
||||
let tag_bytes: [u8; 4] = decrypted[PROTO_TAG_POS..PROTO_TAG_POS + 4]
|
||||
.try_into()
|
||||
.unwrap();
|
||||
|
||||
let proto_tag = match ProtoTag::from_bytes(tag_bytes) {
|
||||
Some(tag) => tag,
|
||||
None => continue, // Try next secret
|
||||
None => continue,
|
||||
};
|
||||
|
||||
// Extract DC index
|
||||
let dc_idx = i16::from_le_bytes(
|
||||
decrypted[DC_IDX_POS..DC_IDX_POS + 2].try_into().unwrap()
|
||||
);
|
||||
|
||||
// Derive encryption key
|
||||
let mut enc_key_input = Vec::with_capacity(PREKEY_LEN + secret.len());
|
||||
enc_key_input.extend_from_slice(enc_prekey);
|
||||
enc_key_input.extend_from_slice(secret);
|
||||
@@ -123,18 +129,15 @@ pub fn generate_nonce<R: FnMut(usize) -> Vec<u8>>(mut random_bytes: R) -> [u8; H
|
||||
|
||||
/// Check if nonce is valid (not matching reserved patterns)
|
||||
pub fn is_valid_nonce(nonce: &[u8; HANDSHAKE_LEN]) -> bool {
|
||||
// Check first byte
|
||||
if RESERVED_NONCE_FIRST_BYTES.contains(&nonce[0]) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check first 4 bytes
|
||||
let first_four: [u8; 4] = nonce[..4].try_into().unwrap();
|
||||
if RESERVED_NONCE_BEGINNINGS.contains(&first_four) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check bytes 4-7
|
||||
let continue_four: [u8; 4] = nonce[4..8].try_into().unwrap();
|
||||
if RESERVED_NONCE_CONTINUES.contains(&continue_four) {
|
||||
return false;
|
||||
@@ -147,12 +150,10 @@ pub fn is_valid_nonce(nonce: &[u8; HANDSHAKE_LEN]) -> bool {
|
||||
pub fn prepare_tg_nonce(
|
||||
nonce: &mut [u8; HANDSHAKE_LEN],
|
||||
proto_tag: ProtoTag,
|
||||
enc_key_iv: Option<&[u8]>, // For fast mode
|
||||
enc_key_iv: Option<&[u8]>,
|
||||
) {
|
||||
// Set protocol tag
|
||||
nonce[PROTO_TAG_POS..PROTO_TAG_POS + 4].copy_from_slice(&proto_tag.to_bytes());
|
||||
|
||||
// For fast mode, copy the reversed enc_key_iv
|
||||
if let Some(key_iv) = enc_key_iv {
|
||||
let reversed: Vec<u8> = key_iv.iter().rev().copied().collect();
|
||||
nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN].copy_from_slice(&reversed);
|
||||
@@ -160,15 +161,19 @@ pub fn prepare_tg_nonce(
|
||||
}
|
||||
|
||||
/// Encrypt the outgoing nonce for Telegram
|
||||
/// Legacy helper — **do not use**.
|
||||
/// WARNING: logic diverges from Python/C reference (SHA256 of 48 bytes, IV from head).
|
||||
/// Kept only to avoid breaking external callers; prefer `encrypt_tg_nonce_with_ciphers`.
|
||||
#[deprecated(
|
||||
note = "Incorrect MTProto obfuscation KDF; use proxy::handshake::encrypt_tg_nonce_with_ciphers"
|
||||
)]
|
||||
pub fn encrypt_nonce(nonce: &[u8; HANDSHAKE_LEN]) -> Vec<u8> {
|
||||
// Derive encryption key from the nonce itself
|
||||
let key_iv = &nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN];
|
||||
let enc_key = sha256(key_iv);
|
||||
let enc_iv = u128::from_be_bytes(key_iv[..IV_LEN].try_into().unwrap());
|
||||
|
||||
let mut encryptor = AesCtr::new(&enc_key, enc_iv);
|
||||
|
||||
// Only encrypt from PROTO_TAG_POS onwards
|
||||
let mut result = nonce.to_vec();
|
||||
let encrypted_part = encryptor.encrypt(&nonce[PROTO_TAG_POS..]);
|
||||
result[PROTO_TAG_POS..].copy_from_slice(&encrypted_part);
|
||||
@@ -182,22 +187,18 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_is_valid_nonce() {
|
||||
// Valid nonce
|
||||
let mut valid = [0x42u8; HANDSHAKE_LEN];
|
||||
valid[4..8].copy_from_slice(&[1, 2, 3, 4]);
|
||||
assert!(is_valid_nonce(&valid));
|
||||
|
||||
// Invalid: starts with 0xef
|
||||
let mut invalid = [0x00u8; HANDSHAKE_LEN];
|
||||
invalid[0] = 0xef;
|
||||
assert!(!is_valid_nonce(&invalid));
|
||||
|
||||
// Invalid: starts with HEAD
|
||||
let mut invalid = [0x00u8; HANDSHAKE_LEN];
|
||||
invalid[..4].copy_from_slice(b"HEAD");
|
||||
assert!(!is_valid_nonce(&invalid));
|
||||
|
||||
// Invalid: bytes 4-7 are zeros
|
||||
let mut invalid = [0x42u8; HANDSHAKE_LEN];
|
||||
invalid[4..8].copy_from_slice(&[0, 0, 0, 0]);
|
||||
assert!(!is_valid_nonce(&invalid));
|
||||
@@ -214,4 +215,4 @@ mod tests {
|
||||
assert!(is_valid_nonce(&nonce));
|
||||
assert_eq!(nonce.len(), HANDSHAKE_LEN);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,27 @@
|
||||
//! Fake TLS 1.3 Handshake
|
||||
//!
|
||||
//! This module handles the fake TLS 1.3 handshake used by MTProto proxy
|
||||
//! for domain fronting. The handshake looks like valid TLS 1.3 but
|
||||
//! actually carries MTProto authentication data.
|
||||
|
||||
use crate::crypto::{sha256_hmac, random::SECURE_RANDOM};
|
||||
use crate::error::{ProxyError, Result};
|
||||
#![allow(dead_code)]
|
||||
|
||||
use crate::crypto::{sha256_hmac, SecureRandom};
|
||||
#[cfg(test)]
|
||||
use crate::error::ProxyError;
|
||||
use super::constants::*;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use num_bigint::BigUint;
|
||||
use num_traits::One;
|
||||
|
||||
// ============= Public Constants =============
|
||||
|
||||
/// TLS handshake digest length
|
||||
pub const TLS_DIGEST_LEN: usize = 32;
|
||||
|
||||
/// Position of digest in TLS ClientHello
|
||||
pub const TLS_DIGEST_POS: usize = 11;
|
||||
|
||||
/// Length to store for replay protection (first 16 bytes of digest)
|
||||
pub const TLS_DIGEST_HALF_LEN: usize = 16;
|
||||
|
||||
@@ -16,6 +29,27 @@ pub const TLS_DIGEST_HALF_LEN: usize = 16;
|
||||
pub const TIME_SKEW_MIN: i64 = -20 * 60; // 20 minutes before
|
||||
pub const TIME_SKEW_MAX: i64 = 10 * 60; // 10 minutes after
|
||||
|
||||
// ============= Private Constants =============
|
||||
|
||||
/// TLS Extension types
|
||||
mod extension_type {
|
||||
pub const KEY_SHARE: u16 = 0x0033;
|
||||
pub const SUPPORTED_VERSIONS: u16 = 0x002b;
|
||||
pub const ALPN: u16 = 0x0010;
|
||||
}
|
||||
|
||||
/// TLS Cipher Suites
|
||||
mod cipher_suite {
|
||||
pub const TLS_AES_128_GCM_SHA256: [u8; 2] = [0x13, 0x01];
|
||||
}
|
||||
|
||||
/// TLS Named Curves
|
||||
mod named_curve {
|
||||
pub const X25519: u16 = 0x001d;
|
||||
}
|
||||
|
||||
// ============= TLS Validation Result =============
|
||||
|
||||
/// Result of validating TLS handshake
|
||||
#[derive(Debug)]
|
||||
pub struct TlsValidation {
|
||||
@@ -29,7 +63,219 @@ pub struct TlsValidation {
|
||||
pub timestamp: u32,
|
||||
}
|
||||
|
||||
// ============= TLS Extension Builder =============
|
||||
|
||||
/// Builder for TLS extensions with correct length calculation
|
||||
#[derive(Clone)]
|
||||
struct TlsExtensionBuilder {
|
||||
extensions: Vec<u8>,
|
||||
}
|
||||
|
||||
impl TlsExtensionBuilder {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
extensions: Vec::with_capacity(128),
|
||||
}
|
||||
}
|
||||
|
||||
/// Add Key Share extension with X25519 key
|
||||
fn add_key_share(&mut self, public_key: &[u8; 32]) -> &mut Self {
|
||||
// Extension type: key_share (0x0033)
|
||||
self.extensions.extend_from_slice(&extension_type::KEY_SHARE.to_be_bytes());
|
||||
|
||||
// Key share entry: curve (2) + key_len (2) + key (32) = 36 bytes
|
||||
// Extension data length
|
||||
let entry_len: u16 = 2 + 2 + 32; // curve + length + key
|
||||
self.extensions.extend_from_slice(&entry_len.to_be_bytes());
|
||||
|
||||
// Named curve: x25519
|
||||
self.extensions.extend_from_slice(&named_curve::X25519.to_be_bytes());
|
||||
|
||||
// Key length
|
||||
self.extensions.extend_from_slice(&(32u16).to_be_bytes());
|
||||
|
||||
// Key data
|
||||
self.extensions.extend_from_slice(public_key);
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Add Supported Versions extension
|
||||
fn add_supported_versions(&mut self, version: u16) -> &mut Self {
|
||||
// Extension type: supported_versions (0x002b)
|
||||
self.extensions.extend_from_slice(&extension_type::SUPPORTED_VERSIONS.to_be_bytes());
|
||||
|
||||
// Extension data: length (2) + version (2)
|
||||
self.extensions.extend_from_slice(&(2u16).to_be_bytes());
|
||||
|
||||
// Selected version
|
||||
self.extensions.extend_from_slice(&version.to_be_bytes());
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Add ALPN extension with a single selected protocol.
|
||||
fn add_alpn(&mut self, proto: &[u8]) -> &mut Self {
|
||||
// Extension type: ALPN (0x0010)
|
||||
self.extensions.extend_from_slice(&extension_type::ALPN.to_be_bytes());
|
||||
|
||||
// ALPN extension format:
|
||||
// extension_data length (2 bytes)
|
||||
// protocols length (2 bytes)
|
||||
// protocol name length (1 byte)
|
||||
// protocol name bytes
|
||||
let proto_len = proto.len() as u8;
|
||||
let list_len: u16 = 1 + proto_len as u16;
|
||||
let ext_len: u16 = 2 + list_len;
|
||||
|
||||
self.extensions.extend_from_slice(&ext_len.to_be_bytes());
|
||||
self.extensions.extend_from_slice(&list_len.to_be_bytes());
|
||||
self.extensions.push(proto_len);
|
||||
self.extensions.extend_from_slice(proto);
|
||||
self
|
||||
}
|
||||
|
||||
/// Build final extensions with length prefix
|
||||
fn build(self) -> Vec<u8> {
|
||||
let mut result = Vec::with_capacity(2 + self.extensions.len());
|
||||
|
||||
// Extensions length (2 bytes)
|
||||
let len = self.extensions.len() as u16;
|
||||
result.extend_from_slice(&len.to_be_bytes());
|
||||
|
||||
// Extensions data
|
||||
result.extend_from_slice(&self.extensions);
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Get current extensions without length prefix (for calculation)
|
||||
#[allow(dead_code)]
|
||||
fn as_bytes(&self) -> &[u8] {
|
||||
&self.extensions
|
||||
}
|
||||
}
|
||||
|
||||
// ============= ServerHello Builder =============
|
||||
|
||||
/// Builder for TLS ServerHello with correct structure
|
||||
struct ServerHelloBuilder {
|
||||
/// Random bytes (32 bytes, will contain digest)
|
||||
random: [u8; 32],
|
||||
/// Session ID (echoed from ClientHello)
|
||||
session_id: Vec<u8>,
|
||||
/// Cipher suite
|
||||
cipher_suite: [u8; 2],
|
||||
/// Compression method
|
||||
compression: u8,
|
||||
/// Extensions
|
||||
extensions: TlsExtensionBuilder,
|
||||
/// Selected ALPN protocol (if any)
|
||||
alpn: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl ServerHelloBuilder {
|
||||
fn new(session_id: Vec<u8>) -> Self {
|
||||
Self {
|
||||
random: [0u8; 32],
|
||||
session_id,
|
||||
cipher_suite: cipher_suite::TLS_AES_128_GCM_SHA256,
|
||||
compression: 0x00,
|
||||
extensions: TlsExtensionBuilder::new(),
|
||||
alpn: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn with_x25519_key(mut self, key: &[u8; 32]) -> Self {
|
||||
self.extensions.add_key_share(key);
|
||||
self
|
||||
}
|
||||
|
||||
fn with_tls13_version(mut self) -> Self {
|
||||
// TLS 1.3 = 0x0304
|
||||
self.extensions.add_supported_versions(0x0304);
|
||||
self
|
||||
}
|
||||
|
||||
fn with_alpn(mut self, proto: Option<Vec<u8>>) -> Self {
|
||||
self.alpn = proto;
|
||||
self
|
||||
}
|
||||
|
||||
/// Build ServerHello message (without record header)
|
||||
fn build_message(&self) -> Vec<u8> {
|
||||
let mut ext_builder = self.extensions.clone();
|
||||
if let Some(ref alpn) = self.alpn {
|
||||
ext_builder.add_alpn(alpn);
|
||||
}
|
||||
let extensions = ext_builder.extensions.clone();
|
||||
let extensions_len = extensions.len() as u16;
|
||||
|
||||
// Calculate total length
|
||||
let body_len = 2 + // version
|
||||
32 + // random
|
||||
1 + self.session_id.len() + // session_id length + data
|
||||
2 + // cipher suite
|
||||
1 + // compression
|
||||
2 + extensions.len(); // extensions length + data
|
||||
|
||||
let mut message = Vec::with_capacity(4 + body_len);
|
||||
|
||||
// Handshake header
|
||||
message.push(0x02); // ServerHello message type
|
||||
|
||||
// 3-byte length
|
||||
let len_bytes = (body_len as u32).to_be_bytes();
|
||||
message.extend_from_slice(&len_bytes[1..4]);
|
||||
|
||||
// Server version (TLS 1.2 in header, actual version in extension)
|
||||
message.extend_from_slice(&TLS_VERSION);
|
||||
|
||||
// Random (32 bytes) - placeholder, will be replaced with digest
|
||||
message.extend_from_slice(&self.random);
|
||||
|
||||
// Session ID
|
||||
message.push(self.session_id.len() as u8);
|
||||
message.extend_from_slice(&self.session_id);
|
||||
|
||||
// Cipher suite
|
||||
message.extend_from_slice(&self.cipher_suite);
|
||||
|
||||
// Compression method
|
||||
message.push(self.compression);
|
||||
|
||||
// Extensions length
|
||||
message.extend_from_slice(&extensions_len.to_be_bytes());
|
||||
|
||||
// Extensions data
|
||||
message.extend_from_slice(&extensions);
|
||||
|
||||
message
|
||||
}
|
||||
|
||||
/// Build complete ServerHello TLS record
|
||||
fn build_record(&self) -> Vec<u8> {
|
||||
let message = self.build_message();
|
||||
|
||||
let mut record = Vec::with_capacity(5 + message.len());
|
||||
|
||||
// TLS record header
|
||||
record.push(TLS_RECORD_HANDSHAKE);
|
||||
record.extend_from_slice(&TLS_VERSION);
|
||||
record.extend_from_slice(&(message.len() as u16).to_be_bytes());
|
||||
|
||||
// Message
|
||||
record.extend_from_slice(&message);
|
||||
|
||||
record
|
||||
}
|
||||
}
|
||||
|
||||
// ============= Public Functions =============
|
||||
|
||||
/// Validate TLS ClientHello against user secrets
|
||||
///
|
||||
/// Returns validation result if a matching user is found.
|
||||
pub fn validate_tls_handshake(
|
||||
handshake: &[u8],
|
||||
secrets: &[(String, Vec<u8>)],
|
||||
@@ -86,9 +332,10 @@ pub fn validate_tls_handshake(
|
||||
// Check time skew
|
||||
if !ignore_time_skew {
|
||||
// Allow very small timestamps (boot time instead of unix time)
|
||||
let is_boot_time = timestamp < 60 * 60 * 24 * 1000;
|
||||
// This is a quirk in some clients that use uptime instead of real time
|
||||
let is_boot_time = timestamp < 60 * 60 * 24 * 1000; // < ~2.7 years in seconds
|
||||
|
||||
if !is_boot_time && (time_diff < TIME_SKEW_MIN || time_diff > TIME_SKEW_MAX) {
|
||||
if !is_boot_time && !(TIME_SKEW_MIN..=TIME_SKEW_MAX).contains(&time_diff) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@@ -104,89 +351,248 @@ pub fn validate_tls_handshake(
|
||||
None
|
||||
}
|
||||
|
||||
fn curve25519_prime() -> BigUint {
|
||||
(BigUint::one() << 255) - BigUint::from(19u32)
|
||||
}
|
||||
|
||||
/// Generate a fake X25519 public key for TLS
|
||||
/// This generates a value that looks like a valid X25519 key
|
||||
pub fn gen_fake_x25519_key() -> [u8; 32] {
|
||||
// For simplicity, just generate random 32 bytes
|
||||
// In real X25519, this would be a point on the curve
|
||||
let bytes = SECURE_RANDOM.bytes(32);
|
||||
bytes.try_into().unwrap()
|
||||
///
|
||||
/// Produces a quadratic residue mod p = 2^255 - 19 by computing n² mod p,
|
||||
/// which matches Python/C behavior and avoids DPI fingerprinting.
|
||||
pub fn gen_fake_x25519_key(rng: &SecureRandom) -> [u8; 32] {
|
||||
let mut n_bytes = [0u8; 32];
|
||||
n_bytes.copy_from_slice(&rng.bytes(32));
|
||||
|
||||
let n = BigUint::from_bytes_le(&n_bytes);
|
||||
let p = curve25519_prime();
|
||||
let pk = (&n * &n) % &p;
|
||||
|
||||
let mut out = pk.to_bytes_le();
|
||||
out.resize(32, 0);
|
||||
let mut result = [0u8; 32];
|
||||
result.copy_from_slice(&out[..32]);
|
||||
result
|
||||
}
|
||||
|
||||
/// Build TLS ServerHello response
|
||||
///
|
||||
/// This builds a complete TLS 1.3-like response including:
|
||||
/// - ServerHello record with extensions
|
||||
/// - Change Cipher Spec record
|
||||
/// - Fake encrypted certificate (Application Data record)
|
||||
///
|
||||
/// The response includes an HMAC digest that the client can verify.
|
||||
pub fn build_server_hello(
|
||||
secret: &[u8],
|
||||
client_digest: &[u8; TLS_DIGEST_LEN],
|
||||
session_id: &[u8],
|
||||
fake_cert_len: usize,
|
||||
rng: &SecureRandom,
|
||||
alpn: Option<Vec<u8>>,
|
||||
new_session_tickets: u8,
|
||||
) -> Vec<u8> {
|
||||
let x25519_key = gen_fake_x25519_key();
|
||||
const MIN_APP_DATA: usize = 64;
|
||||
const MAX_APP_DATA: usize = 16640; // RFC 8446 §5.2 upper bound
|
||||
let fake_cert_len = fake_cert_len.clamp(MIN_APP_DATA, MAX_APP_DATA);
|
||||
let x25519_key = gen_fake_x25519_key(rng);
|
||||
|
||||
// TLS extensions
|
||||
let mut extensions = Vec::new();
|
||||
extensions.extend_from_slice(&[0x00, 0x2e]); // Extension length placeholder
|
||||
extensions.extend_from_slice(&[0x00, 0x33, 0x00, 0x24]); // Key share extension
|
||||
extensions.extend_from_slice(&[0x00, 0x1d, 0x00, 0x20]); // X25519 curve
|
||||
extensions.extend_from_slice(&x25519_key);
|
||||
extensions.extend_from_slice(&[0x00, 0x2b, 0x00, 0x02, 0x03, 0x04]); // Supported versions
|
||||
// Build ServerHello
|
||||
let server_hello = ServerHelloBuilder::new(session_id.to_vec())
|
||||
.with_x25519_key(&x25519_key)
|
||||
.with_tls13_version()
|
||||
.with_alpn(alpn)
|
||||
.build_record();
|
||||
|
||||
// ServerHello body
|
||||
let mut srv_hello = Vec::new();
|
||||
srv_hello.extend_from_slice(&TLS_VERSION);
|
||||
srv_hello.extend_from_slice(&[0u8; TLS_DIGEST_LEN]); // Placeholder for digest
|
||||
srv_hello.push(session_id.len() as u8);
|
||||
srv_hello.extend_from_slice(session_id);
|
||||
srv_hello.extend_from_slice(&[0x13, 0x01]); // TLS_AES_128_GCM_SHA256
|
||||
srv_hello.push(0x00); // No compression
|
||||
srv_hello.extend_from_slice(&extensions);
|
||||
// Build Change Cipher Spec record
|
||||
let change_cipher_spec = [
|
||||
TLS_RECORD_CHANGE_CIPHER,
|
||||
TLS_VERSION[0], TLS_VERSION[1],
|
||||
0x00, 0x01, // length = 1
|
||||
0x01, // CCS byte
|
||||
];
|
||||
|
||||
// Build complete packet
|
||||
let mut hello_pkt = Vec::new();
|
||||
// Build fake certificate (Application Data record)
|
||||
let fake_cert = rng.bytes(fake_cert_len);
|
||||
let mut app_data_record = Vec::with_capacity(5 + fake_cert_len);
|
||||
app_data_record.push(TLS_RECORD_APPLICATION);
|
||||
app_data_record.extend_from_slice(&TLS_VERSION);
|
||||
app_data_record.extend_from_slice(&(fake_cert_len as u16).to_be_bytes());
|
||||
// Fill ApplicationData with fully random bytes of desired length to avoid
|
||||
// deterministic DPI fingerprints (fixed inner content type markers).
|
||||
app_data_record.extend_from_slice(&fake_cert);
|
||||
|
||||
// ServerHello record
|
||||
hello_pkt.push(TLS_RECORD_HANDSHAKE);
|
||||
hello_pkt.extend_from_slice(&TLS_VERSION);
|
||||
hello_pkt.extend_from_slice(&((srv_hello.len() + 4) as u16).to_be_bytes());
|
||||
hello_pkt.push(0x02); // ServerHello message type
|
||||
let len_bytes = (srv_hello.len() as u32).to_be_bytes();
|
||||
hello_pkt.extend_from_slice(&len_bytes[1..4]); // 3-byte length
|
||||
hello_pkt.extend_from_slice(&srv_hello);
|
||||
|
||||
// Change Cipher Spec record
|
||||
hello_pkt.extend_from_slice(&[
|
||||
TLS_RECORD_CHANGE_CIPHER,
|
||||
TLS_VERSION[0], TLS_VERSION[1],
|
||||
0x00, 0x01, 0x01
|
||||
]);
|
||||
|
||||
// Application Data record (fake certificate)
|
||||
let fake_cert = SECURE_RANDOM.bytes(fake_cert_len);
|
||||
hello_pkt.push(TLS_RECORD_APPLICATION);
|
||||
hello_pkt.extend_from_slice(&TLS_VERSION);
|
||||
hello_pkt.extend_from_slice(&(fake_cert.len() as u16).to_be_bytes());
|
||||
hello_pkt.extend_from_slice(&fake_cert);
|
||||
// Build optional NewSessionTicket records (TLS 1.3 handshake messages are encrypted;
|
||||
// here we mimic with opaque ApplicationData records of plausible size).
|
||||
let mut tickets = Vec::new();
|
||||
if new_session_tickets > 0 {
|
||||
for _ in 0..new_session_tickets {
|
||||
let ticket_len: usize = rng.range(48) + 48; // 48-95 bytes
|
||||
let mut record = Vec::with_capacity(5 + ticket_len);
|
||||
record.push(TLS_RECORD_APPLICATION);
|
||||
record.extend_from_slice(&TLS_VERSION);
|
||||
record.extend_from_slice(&(ticket_len as u16).to_be_bytes());
|
||||
record.extend_from_slice(&rng.bytes(ticket_len));
|
||||
tickets.push(record);
|
||||
}
|
||||
}
|
||||
|
||||
// Combine all records
|
||||
let mut response = Vec::with_capacity(
|
||||
server_hello.len() + change_cipher_spec.len() + app_data_record.len() + tickets.iter().map(|r| r.len()).sum::<usize>()
|
||||
);
|
||||
response.extend_from_slice(&server_hello);
|
||||
response.extend_from_slice(&change_cipher_spec);
|
||||
response.extend_from_slice(&app_data_record);
|
||||
for t in &tickets {
|
||||
response.extend_from_slice(t);
|
||||
}
|
||||
|
||||
// Compute HMAC for the response
|
||||
let mut hmac_input = Vec::with_capacity(TLS_DIGEST_LEN + hello_pkt.len());
|
||||
let mut hmac_input = Vec::with_capacity(TLS_DIGEST_LEN + response.len());
|
||||
hmac_input.extend_from_slice(client_digest);
|
||||
hmac_input.extend_from_slice(&hello_pkt);
|
||||
hmac_input.extend_from_slice(&response);
|
||||
let response_digest = sha256_hmac(secret, &hmac_input);
|
||||
|
||||
// Insert computed digest
|
||||
// Position: after record header (5) + message type/length (4) + version (2) = 11
|
||||
hello_pkt[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN]
|
||||
// Insert computed digest into ServerHello
|
||||
// Position: record header (5) + message type (1) + length (3) + version (2) = 11
|
||||
response[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN]
|
||||
.copy_from_slice(&response_digest);
|
||||
|
||||
hello_pkt
|
||||
response
|
||||
}
|
||||
|
||||
/// Extract SNI (server_name) from a TLS ClientHello.
|
||||
pub fn extract_sni_from_client_hello(handshake: &[u8]) -> Option<String> {
|
||||
if handshake.len() < 43 || handshake[0] != TLS_RECORD_HANDSHAKE {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut pos = 5; // after record header
|
||||
if handshake.get(pos).copied()? != 0x01 {
|
||||
return None; // not ClientHello
|
||||
}
|
||||
|
||||
// Handshake length bytes
|
||||
pos += 4; // type + len (3)
|
||||
|
||||
// version (2) + random (32)
|
||||
pos += 2 + 32;
|
||||
if pos + 1 > handshake.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let session_id_len = *handshake.get(pos)? as usize;
|
||||
pos += 1 + session_id_len;
|
||||
if pos + 2 > handshake.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let cipher_suites_len = u16::from_be_bytes([handshake[pos], handshake[pos + 1]]) as usize;
|
||||
pos += 2 + cipher_suites_len;
|
||||
if pos + 1 > handshake.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let comp_len = *handshake.get(pos)? as usize;
|
||||
pos += 1 + comp_len;
|
||||
if pos + 2 > handshake.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let ext_len = u16::from_be_bytes([handshake[pos], handshake[pos + 1]]) as usize;
|
||||
pos += 2;
|
||||
let ext_end = pos + ext_len;
|
||||
if ext_end > handshake.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
while pos + 4 <= ext_end {
|
||||
let etype = u16::from_be_bytes([handshake[pos], handshake[pos + 1]]);
|
||||
let elen = u16::from_be_bytes([handshake[pos + 2], handshake[pos + 3]]) as usize;
|
||||
pos += 4;
|
||||
if pos + elen > ext_end {
|
||||
break;
|
||||
}
|
||||
if etype == 0x0000 && elen >= 5 {
|
||||
// server_name extension
|
||||
let list_len = u16::from_be_bytes([handshake[pos], handshake[pos + 1]]) as usize;
|
||||
let mut sn_pos = pos + 2;
|
||||
let sn_end = std::cmp::min(sn_pos + list_len, pos + elen);
|
||||
while sn_pos + 3 <= sn_end {
|
||||
let name_type = handshake[sn_pos];
|
||||
let name_len = u16::from_be_bytes([handshake[sn_pos + 1], handshake[sn_pos + 2]]) as usize;
|
||||
sn_pos += 3;
|
||||
if sn_pos + name_len > sn_end {
|
||||
break;
|
||||
}
|
||||
if name_type == 0 && name_len > 0
|
||||
&& let Ok(host) = std::str::from_utf8(&handshake[sn_pos..sn_pos + name_len])
|
||||
{
|
||||
return Some(host.to_string());
|
||||
}
|
||||
sn_pos += name_len;
|
||||
}
|
||||
}
|
||||
pos += elen;
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Extract ALPN protocol list from ClientHello, return in offered order.
|
||||
pub fn extract_alpn_from_client_hello(handshake: &[u8]) -> Vec<Vec<u8>> {
|
||||
let mut pos = 5; // after record header
|
||||
if handshake.get(pos) != Some(&0x01) {
|
||||
return Vec::new();
|
||||
}
|
||||
pos += 4; // type + len
|
||||
pos += 2 + 32; // version + random
|
||||
if pos >= handshake.len() { return Vec::new(); }
|
||||
let session_id_len = *handshake.get(pos).unwrap_or(&0) as usize;
|
||||
pos += 1 + session_id_len;
|
||||
if pos + 2 > handshake.len() { return Vec::new(); }
|
||||
let cipher_len = u16::from_be_bytes([handshake[pos], handshake[pos+1]]) as usize;
|
||||
pos += 2 + cipher_len;
|
||||
if pos >= handshake.len() { return Vec::new(); }
|
||||
let comp_len = *handshake.get(pos).unwrap_or(&0) as usize;
|
||||
pos += 1 + comp_len;
|
||||
if pos + 2 > handshake.len() { return Vec::new(); }
|
||||
let ext_len = u16::from_be_bytes([handshake[pos], handshake[pos+1]]) as usize;
|
||||
pos += 2;
|
||||
let ext_end = pos + ext_len;
|
||||
if ext_end > handshake.len() { return Vec::new(); }
|
||||
let mut out = Vec::new();
|
||||
while pos + 4 <= ext_end {
|
||||
let etype = u16::from_be_bytes([handshake[pos], handshake[pos+1]]);
|
||||
let elen = u16::from_be_bytes([handshake[pos+2], handshake[pos+3]]) as usize;
|
||||
pos += 4;
|
||||
if pos + elen > ext_end { break; }
|
||||
if etype == extension_type::ALPN && elen >= 3 {
|
||||
let list_len = u16::from_be_bytes([handshake[pos], handshake[pos+1]]) as usize;
|
||||
let mut lp = pos + 2;
|
||||
let list_end = (pos + 2).saturating_add(list_len).min(pos + elen);
|
||||
while lp < list_end {
|
||||
let plen = handshake[lp] as usize;
|
||||
lp += 1;
|
||||
if lp + plen > list_end { break; }
|
||||
out.push(handshake[lp..lp+plen].to_vec());
|
||||
lp += plen;
|
||||
}
|
||||
break;
|
||||
}
|
||||
pos += elen;
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
|
||||
/// Check if bytes look like a TLS ClientHello
|
||||
pub fn is_tls_handshake(first_bytes: &[u8]) -> bool {
|
||||
if first_bytes.len() < 3 {
|
||||
return false;
|
||||
}
|
||||
|
||||
// TLS record header: 0x16 0x03 0x01
|
||||
// TLS record header: 0x16 (handshake) 0x03 0x01 (TLS 1.0)
|
||||
first_bytes[0] == TLS_RECORD_HANDSHAKE
|
||||
&& first_bytes[1] == 0x03
|
||||
&& first_bytes[2] == 0x01
|
||||
@@ -206,6 +612,61 @@ pub fn parse_tls_record_header(header: &[u8; 5]) -> Option<(u8, u16)> {
|
||||
Some((record_type, length))
|
||||
}
|
||||
|
||||
/// Validate a ServerHello response structure
|
||||
///
|
||||
/// This is useful for testing that our ServerHello is well-formed.
|
||||
#[cfg(test)]
|
||||
fn validate_server_hello_structure(data: &[u8]) -> Result<(), ProxyError> {
|
||||
if data.len() < 5 {
|
||||
return Err(ProxyError::InvalidTlsRecord {
|
||||
record_type: 0,
|
||||
version: [0, 0],
|
||||
});
|
||||
}
|
||||
|
||||
// Check record header
|
||||
if data[0] != TLS_RECORD_HANDSHAKE {
|
||||
return Err(ProxyError::InvalidTlsRecord {
|
||||
record_type: data[0],
|
||||
version: [data[1], data[2]],
|
||||
});
|
||||
}
|
||||
|
||||
// Check version
|
||||
if data[1..3] != TLS_VERSION {
|
||||
return Err(ProxyError::InvalidTlsRecord {
|
||||
record_type: data[0],
|
||||
version: [data[1], data[2]],
|
||||
});
|
||||
}
|
||||
|
||||
// Check record length
|
||||
let record_len = u16::from_be_bytes([data[3], data[4]]) as usize;
|
||||
if data.len() < 5 + record_len {
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
format!("ServerHello record truncated: expected {}, got {}",
|
||||
5 + record_len, data.len())
|
||||
));
|
||||
}
|
||||
|
||||
// Check message type
|
||||
if data[5] != 0x02 {
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
format!("Expected ServerHello (0x02), got 0x{:02x}", data[5])
|
||||
));
|
||||
}
|
||||
|
||||
// Parse message length
|
||||
let msg_len = u32::from_be_bytes([0, data[6], data[7], data[8]]) as usize;
|
||||
if msg_len + 4 != record_len {
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
format!("Message length mismatch: {} + 4 != {}", msg_len, record_len)
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -234,11 +695,263 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_gen_fake_x25519_key() {
|
||||
let key1 = gen_fake_x25519_key();
|
||||
let key2 = gen_fake_x25519_key();
|
||||
let rng = SecureRandom::new();
|
||||
let key1 = gen_fake_x25519_key(&rng);
|
||||
let key2 = gen_fake_x25519_key(&rng);
|
||||
|
||||
assert_eq!(key1.len(), 32);
|
||||
assert_eq!(key2.len(), 32);
|
||||
assert_ne!(key1, key2); // Should be random
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fake_x25519_key_is_quadratic_residue() {
|
||||
let rng = SecureRandom::new();
|
||||
let key = gen_fake_x25519_key(&rng);
|
||||
let p = curve25519_prime();
|
||||
let k_num = BigUint::from_bytes_le(&key);
|
||||
let exponent = (&p - BigUint::one()) >> 1;
|
||||
let legendre = k_num.modpow(&exponent, &p);
|
||||
assert_eq!(legendre, BigUint::one());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tls_extension_builder() {
|
||||
let key = [0x42u8; 32];
|
||||
|
||||
let mut builder = TlsExtensionBuilder::new();
|
||||
builder.add_key_share(&key);
|
||||
builder.add_supported_versions(0x0304);
|
||||
|
||||
let result = builder.build();
|
||||
|
||||
// Check length prefix
|
||||
let len = u16::from_be_bytes([result[0], result[1]]) as usize;
|
||||
assert_eq!(len, result.len() - 2);
|
||||
|
||||
// Check key_share extension is present
|
||||
assert!(result.len() > 40); // At least key share
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_server_hello_builder() {
|
||||
let session_id = vec![0x01, 0x02, 0x03, 0x04];
|
||||
let key = [0x55u8; 32];
|
||||
|
||||
let builder = ServerHelloBuilder::new(session_id.clone())
|
||||
.with_x25519_key(&key)
|
||||
.with_tls13_version();
|
||||
|
||||
let record = builder.build_record();
|
||||
|
||||
// Validate structure
|
||||
validate_server_hello_structure(&record).expect("Invalid ServerHello structure");
|
||||
|
||||
// Check record type
|
||||
assert_eq!(record[0], TLS_RECORD_HANDSHAKE);
|
||||
|
||||
// Check version
|
||||
assert_eq!(&record[1..3], &TLS_VERSION);
|
||||
|
||||
// Check message type (ServerHello = 0x02)
|
||||
assert_eq!(record[5], 0x02);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_server_hello_structure() {
|
||||
let secret = b"test secret";
|
||||
let client_digest = [0x42u8; 32];
|
||||
let session_id = vec![0xAA; 32];
|
||||
|
||||
let rng = SecureRandom::new();
|
||||
let response = build_server_hello(secret, &client_digest, &session_id, 2048, &rng, None, 0);
|
||||
|
||||
// Should have at least 3 records
|
||||
assert!(response.len() > 100);
|
||||
|
||||
// First record should be ServerHello
|
||||
assert_eq!(response[0], TLS_RECORD_HANDSHAKE);
|
||||
|
||||
// Validate ServerHello structure
|
||||
validate_server_hello_structure(&response).expect("Invalid ServerHello");
|
||||
|
||||
// Find Change Cipher Spec
|
||||
let server_hello_len = 5 + u16::from_be_bytes([response[3], response[4]]) as usize;
|
||||
let ccs_start = server_hello_len;
|
||||
|
||||
assert!(response.len() > ccs_start + 6);
|
||||
assert_eq!(response[ccs_start], TLS_RECORD_CHANGE_CIPHER);
|
||||
|
||||
// Find Application Data
|
||||
let ccs_len = 5 + u16::from_be_bytes([response[ccs_start + 3], response[ccs_start + 4]]) as usize;
|
||||
let app_start = ccs_start + ccs_len;
|
||||
|
||||
assert!(response.len() > app_start + 5);
|
||||
assert_eq!(response[app_start], TLS_RECORD_APPLICATION);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_server_hello_digest() {
|
||||
let secret = b"test secret key here";
|
||||
let client_digest = [0x42u8; 32];
|
||||
let session_id = vec![0xAA; 32];
|
||||
|
||||
let rng = SecureRandom::new();
|
||||
let response1 = build_server_hello(secret, &client_digest, &session_id, 1024, &rng, None, 0);
|
||||
let response2 = build_server_hello(secret, &client_digest, &session_id, 1024, &rng, None, 0);
|
||||
|
||||
// Digest position should have non-zero data
|
||||
let digest1 = &response1[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN];
|
||||
assert!(!digest1.iter().all(|&b| b == 0));
|
||||
|
||||
// Different calls should have different digests (due to random cert)
|
||||
let digest2 = &response2[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN];
|
||||
assert_ne!(digest1, digest2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_server_hello_extensions_length() {
|
||||
let session_id = vec![0x01; 32];
|
||||
let key = [0x55u8; 32];
|
||||
|
||||
let builder = ServerHelloBuilder::new(session_id)
|
||||
.with_x25519_key(&key)
|
||||
.with_tls13_version();
|
||||
|
||||
let record = builder.build_record();
|
||||
|
||||
// Parse to find extensions
|
||||
let msg_start = 5; // After record header
|
||||
let msg_len = u32::from_be_bytes([0, record[6], record[7], record[8]]) as usize;
|
||||
|
||||
// Skip to session ID
|
||||
let session_id_pos = msg_start + 4 + 2 + 32; // header(4) + version(2) + random(32)
|
||||
let session_id_len = record[session_id_pos] as usize;
|
||||
|
||||
// Skip to extensions
|
||||
let ext_len_pos = session_id_pos + 1 + session_id_len + 2 + 1; // session_id + cipher(2) + compression(1)
|
||||
let ext_len = u16::from_be_bytes([record[ext_len_pos], record[ext_len_pos + 1]]) as usize;
|
||||
|
||||
// Verify extensions length matches actual data
|
||||
let extensions_data = &record[ext_len_pos + 2..msg_start + 4 + msg_len];
|
||||
assert_eq!(ext_len, extensions_data.len(),
|
||||
"Extension length mismatch: declared {}, actual {}", ext_len, extensions_data.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_tls_handshake_format() {
|
||||
// Build a minimal ClientHello-like structure
|
||||
let mut handshake = vec![0u8; 100];
|
||||
|
||||
// Put a valid-looking digest at position 11
|
||||
handshake[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN]
|
||||
.copy_from_slice(&[0x42; 32]);
|
||||
|
||||
// Session ID length
|
||||
handshake[TLS_DIGEST_POS + TLS_DIGEST_LEN] = 32;
|
||||
|
||||
// This won't validate (wrong HMAC) but shouldn't panic
|
||||
let secrets = vec![("test".to_string(), b"secret".to_vec())];
|
||||
let result = validate_tls_handshake(&handshake, &secrets, true);
|
||||
|
||||
// Should return None (no match) but not panic
|
||||
assert!(result.is_none());
|
||||
}
|
||||
|
||||
fn build_client_hello_with_exts(exts: Vec<(u16, Vec<u8>)>, host: &str) -> Vec<u8> {
|
||||
let mut body = Vec::new();
|
||||
body.extend_from_slice(&TLS_VERSION); // legacy version
|
||||
body.extend_from_slice(&[0u8; 32]); // random
|
||||
body.push(0); // session id len
|
||||
body.extend_from_slice(&2u16.to_be_bytes()); // cipher suites len
|
||||
body.extend_from_slice(&[0x13, 0x01]); // TLS_AES_128_GCM_SHA256
|
||||
body.push(1); // compression len
|
||||
body.push(0); // null compression
|
||||
|
||||
// Build SNI extension
|
||||
let host_bytes = host.as_bytes();
|
||||
let mut sni_ext = Vec::new();
|
||||
sni_ext.extend_from_slice(&(host_bytes.len() as u16 + 3).to_be_bytes());
|
||||
sni_ext.push(0);
|
||||
sni_ext.extend_from_slice(&(host_bytes.len() as u16).to_be_bytes());
|
||||
sni_ext.extend_from_slice(host_bytes);
|
||||
|
||||
let mut ext_blob = Vec::new();
|
||||
for (typ, data) in exts {
|
||||
ext_blob.extend_from_slice(&typ.to_be_bytes());
|
||||
ext_blob.extend_from_slice(&(data.len() as u16).to_be_bytes());
|
||||
ext_blob.extend_from_slice(&data);
|
||||
}
|
||||
// SNI last
|
||||
ext_blob.extend_from_slice(&0x0000u16.to_be_bytes());
|
||||
ext_blob.extend_from_slice(&(sni_ext.len() as u16).to_be_bytes());
|
||||
ext_blob.extend_from_slice(&sni_ext);
|
||||
|
||||
body.extend_from_slice(&(ext_blob.len() as u16).to_be_bytes());
|
||||
body.extend_from_slice(&ext_blob);
|
||||
|
||||
let mut handshake = Vec::new();
|
||||
handshake.push(0x01); // ClientHello
|
||||
let len_bytes = (body.len() as u32).to_be_bytes();
|
||||
handshake.extend_from_slice(&len_bytes[1..4]);
|
||||
handshake.extend_from_slice(&body);
|
||||
|
||||
let mut record = Vec::new();
|
||||
record.push(TLS_RECORD_HANDSHAKE);
|
||||
record.extend_from_slice(&[0x03, 0x01]);
|
||||
record.extend_from_slice(&(handshake.len() as u16).to_be_bytes());
|
||||
record.extend_from_slice(&handshake);
|
||||
record
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_sni_with_grease_extension() {
|
||||
// GREASE type 0x0a0a with zero length before SNI
|
||||
let ch = build_client_hello_with_exts(vec![(0x0a0a, Vec::new())], "example.com");
|
||||
let sni = extract_sni_from_client_hello(&ch);
|
||||
assert_eq!(sni.as_deref(), Some("example.com"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_sni_tolerates_empty_unknown_extension() {
|
||||
let ch = build_client_hello_with_exts(vec![(0x1234, Vec::new())], "test.local");
|
||||
let sni = extract_sni_from_client_hello(&ch);
|
||||
assert_eq!(sni.as_deref(), Some("test.local"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_alpn_single() {
|
||||
let mut alpn_data = Vec::new();
|
||||
// list length = 3 (1 length byte + "h2")
|
||||
alpn_data.extend_from_slice(&3u16.to_be_bytes());
|
||||
alpn_data.push(2);
|
||||
alpn_data.extend_from_slice(b"h2");
|
||||
let ch = build_client_hello_with_exts(vec![(0x0010, alpn_data)], "alpn.test");
|
||||
let alpn = extract_alpn_from_client_hello(&ch);
|
||||
let alpn_str: Vec<String> = alpn
|
||||
.iter()
|
||||
.map(|p| std::str::from_utf8(p).unwrap().to_string())
|
||||
.collect();
|
||||
assert_eq!(alpn_str, vec!["h2"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_alpn_multiple() {
|
||||
let mut alpn_data = Vec::new();
|
||||
// list length = 11 (sum of per-proto lengths including length bytes)
|
||||
alpn_data.extend_from_slice(&11u16.to_be_bytes());
|
||||
alpn_data.push(2);
|
||||
alpn_data.extend_from_slice(b"h2");
|
||||
alpn_data.push(4);
|
||||
alpn_data.extend_from_slice(b"spdy");
|
||||
alpn_data.push(2);
|
||||
alpn_data.extend_from_slice(b"h3");
|
||||
let ch = build_client_hello_with_exts(vec![(0x0010, alpn_data)], "alpn.test");
|
||||
let alpn = extract_alpn_from_client_hello(&ch);
|
||||
let alpn_str: Vec<String> = alpn
|
||||
.iter()
|
||||
.map(|p| std::str::from_utf8(p).unwrap().to_string())
|
||||
.collect();
|
||||
assert_eq!(alpn_str, vec!["h2", "spdy", "h3"]);
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
194
src/proxy/direct_relay.rs
Normal file
194
src/proxy/direct_relay.rs
Normal file
@@ -0,0 +1,194 @@
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::Write;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::error::Result;
|
||||
use crate::protocol::constants::*;
|
||||
use crate::proxy::handshake::{HandshakeSuccess, encrypt_tg_nonce_with_ciphers, generate_tg_nonce};
|
||||
use crate::proxy::relay::relay_bidirectional;
|
||||
use crate::stats::Stats;
|
||||
use crate::stream::{BufferPool, CryptoReader, CryptoWriter};
|
||||
use crate::transport::UpstreamManager;
|
||||
|
||||
pub(crate) async fn handle_via_direct<R, W>(
|
||||
client_reader: CryptoReader<R>,
|
||||
client_writer: CryptoWriter<W>,
|
||||
success: HandshakeSuccess,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
stats: Arc<Stats>,
|
||||
config: Arc<ProxyConfig>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
) -> Result<()>
|
||||
where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
let user = &success.user;
|
||||
let dc_addr = get_dc_addr_static(success.dc_idx, &config)?;
|
||||
|
||||
debug!(
|
||||
user = %user,
|
||||
peer = %success.peer,
|
||||
dc = success.dc_idx,
|
||||
dc_addr = %dc_addr,
|
||||
proto = ?success.proto_tag,
|
||||
mode = "direct",
|
||||
"Connecting to Telegram DC"
|
||||
);
|
||||
|
||||
let tg_stream = upstream_manager
|
||||
.connect(dc_addr, Some(success.dc_idx), user.strip_prefix("scope_").filter(|s| !s.is_empty()))
|
||||
.await?;
|
||||
|
||||
debug!(peer = %success.peer, dc_addr = %dc_addr, "Connected, performing TG handshake");
|
||||
|
||||
let (tg_reader, tg_writer) =
|
||||
do_tg_handshake_static(tg_stream, &success, &config, rng.as_ref()).await?;
|
||||
|
||||
debug!(peer = %success.peer, "TG handshake complete, starting relay");
|
||||
|
||||
stats.increment_user_connects(user);
|
||||
stats.increment_user_curr_connects(user);
|
||||
stats.increment_current_connections_direct();
|
||||
|
||||
let relay_result = relay_bidirectional(
|
||||
client_reader,
|
||||
client_writer,
|
||||
tg_reader,
|
||||
tg_writer,
|
||||
user,
|
||||
Arc::clone(&stats),
|
||||
buffer_pool,
|
||||
)
|
||||
.await;
|
||||
|
||||
stats.decrement_current_connections_direct();
|
||||
stats.decrement_user_curr_connects(user);
|
||||
|
||||
match &relay_result {
|
||||
Ok(()) => debug!(user = %user, "Direct relay completed"),
|
||||
Err(e) => debug!(user = %user, error = %e, "Direct relay ended with error"),
|
||||
}
|
||||
|
||||
relay_result
|
||||
}
|
||||
|
||||
fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
|
||||
let prefer_v6 = config.network.prefer == 6 && config.network.ipv6.unwrap_or(true);
|
||||
let datacenters = if prefer_v6 {
|
||||
&*TG_DATACENTERS_V6
|
||||
} else {
|
||||
&*TG_DATACENTERS_V4
|
||||
};
|
||||
|
||||
let num_dcs = datacenters.len();
|
||||
|
||||
let dc_key = dc_idx.to_string();
|
||||
if let Some(addrs) = config.dc_overrides.get(&dc_key) {
|
||||
let mut parsed = Vec::new();
|
||||
for addr_str in addrs {
|
||||
match addr_str.parse::<SocketAddr>() {
|
||||
Ok(addr) => parsed.push(addr),
|
||||
Err(_) => warn!(dc_idx = dc_idx, addr_str = %addr_str, "Invalid DC override address in config, ignoring"),
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(addr) = parsed
|
||||
.iter()
|
||||
.find(|a| a.is_ipv6() == prefer_v6)
|
||||
.or_else(|| parsed.first())
|
||||
.copied()
|
||||
{
|
||||
debug!(dc_idx = dc_idx, addr = %addr, count = parsed.len(), "Using DC override from config");
|
||||
return Ok(addr);
|
||||
}
|
||||
}
|
||||
|
||||
let abs_dc = dc_idx.unsigned_abs() as usize;
|
||||
if abs_dc >= 1 && abs_dc <= num_dcs {
|
||||
return Ok(SocketAddr::new(datacenters[abs_dc - 1], TG_DATACENTER_PORT));
|
||||
}
|
||||
|
||||
// Unknown DC requested by client without override: log and fall back.
|
||||
if !config.dc_overrides.contains_key(&dc_key) {
|
||||
warn!(dc_idx = dc_idx, "Requested non-standard DC with no override; falling back to default cluster");
|
||||
if config.general.unknown_dc_file_log_enabled
|
||||
&& let Some(path) = &config.general.unknown_dc_log_path
|
||||
&& let Ok(handle) = tokio::runtime::Handle::try_current()
|
||||
{
|
||||
let path = path.clone();
|
||||
handle.spawn_blocking(move || {
|
||||
if let Ok(mut file) = OpenOptions::new().create(true).append(true).open(path) {
|
||||
let _ = writeln!(file, "dc_idx={dc_idx}");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let default_dc = config.default_dc.unwrap_or(2) as usize;
|
||||
let fallback_idx = if default_dc >= 1 && default_dc <= num_dcs {
|
||||
default_dc - 1
|
||||
} else {
|
||||
1
|
||||
};
|
||||
|
||||
info!(
|
||||
original_dc = dc_idx,
|
||||
fallback_dc = (fallback_idx + 1) as u16,
|
||||
fallback_addr = %datacenters[fallback_idx],
|
||||
"Special DC ---> default_cluster"
|
||||
);
|
||||
|
||||
Ok(SocketAddr::new(
|
||||
datacenters[fallback_idx],
|
||||
TG_DATACENTER_PORT,
|
||||
))
|
||||
}
|
||||
|
||||
async fn do_tg_handshake_static(
|
||||
mut stream: TcpStream,
|
||||
success: &HandshakeSuccess,
|
||||
config: &ProxyConfig,
|
||||
rng: &SecureRandom,
|
||||
) -> Result<(
|
||||
CryptoReader<tokio::net::tcp::OwnedReadHalf>,
|
||||
CryptoWriter<tokio::net::tcp::OwnedWriteHalf>,
|
||||
)> {
|
||||
let (nonce, _tg_enc_key, _tg_enc_iv, _tg_dec_key, _tg_dec_iv) = generate_tg_nonce(
|
||||
success.proto_tag,
|
||||
success.dc_idx,
|
||||
&success.dec_key,
|
||||
success.dec_iv,
|
||||
&success.enc_key,
|
||||
success.enc_iv,
|
||||
rng,
|
||||
config.general.fast_mode,
|
||||
);
|
||||
|
||||
let (encrypted_nonce, tg_encryptor, tg_decryptor) = encrypt_tg_nonce_with_ciphers(&nonce);
|
||||
|
||||
debug!(
|
||||
peer = %success.peer,
|
||||
nonce_head = %hex::encode(&nonce[..16]),
|
||||
"Sending nonce to Telegram"
|
||||
);
|
||||
|
||||
stream.write_all(&encrypted_nonce).await?;
|
||||
stream.flush().await?;
|
||||
|
||||
let (read_half, write_half) = stream.into_split();
|
||||
|
||||
let max_pending = config.general.crypto_pending_buffer;
|
||||
Ok((
|
||||
CryptoReader::new(read_half, tg_decryptor),
|
||||
CryptoWriter::new(write_half, tg_encryptor, max_pending),
|
||||
))
|
||||
}
|
||||
@@ -1,19 +1,53 @@
|
||||
//! MTProto Handshake Magics
|
||||
//! MTProto Handshake
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
|
||||
use tracing::{debug, warn, trace, info};
|
||||
use tracing::{debug, warn, trace};
|
||||
use zeroize::Zeroize;
|
||||
|
||||
use crate::crypto::{sha256, AesCtr};
|
||||
use crate::crypto::random::SECURE_RANDOM;
|
||||
use crate::crypto::{sha256, AesCtr, SecureRandom};
|
||||
use rand::Rng;
|
||||
use crate::protocol::constants::*;
|
||||
use crate::protocol::tls;
|
||||
use crate::stream::{FakeTlsReader, FakeTlsWriter, CryptoReader, CryptoWriter};
|
||||
use crate::error::{ProxyError, HandshakeResult};
|
||||
use crate::stats::ReplayChecker;
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::tls_front::{TlsFrontCache, emulator};
|
||||
|
||||
fn decode_user_secrets(
|
||||
config: &ProxyConfig,
|
||||
preferred_user: Option<&str>,
|
||||
) -> Vec<(String, Vec<u8>)> {
|
||||
let mut secrets = Vec::with_capacity(config.access.users.len());
|
||||
|
||||
if let Some(preferred) = preferred_user
|
||||
&& let Some(secret_hex) = config.access.users.get(preferred)
|
||||
&& let Ok(bytes) = hex::decode(secret_hex)
|
||||
{
|
||||
secrets.push((preferred.to_string(), bytes));
|
||||
}
|
||||
|
||||
for (name, secret_hex) in &config.access.users {
|
||||
if preferred_user.is_some_and(|preferred| preferred == name.as_str()) {
|
||||
continue;
|
||||
}
|
||||
if let Ok(bytes) = hex::decode(secret_hex) {
|
||||
secrets.push((name.clone(), bytes));
|
||||
}
|
||||
}
|
||||
|
||||
secrets
|
||||
}
|
||||
|
||||
/// Result of successful handshake
|
||||
///
|
||||
/// Key material (`dec_key`, `dec_iv`, `enc_key`, `enc_iv`) is
|
||||
/// zeroized on drop.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HandshakeSuccess {
|
||||
/// Authenticated user name
|
||||
@@ -34,6 +68,15 @@ pub struct HandshakeSuccess {
|
||||
pub is_tls: bool,
|
||||
}
|
||||
|
||||
impl Drop for HandshakeSuccess {
|
||||
fn drop(&mut self) {
|
||||
self.dec_key.zeroize();
|
||||
self.dec_iv.zeroize();
|
||||
self.enc_key.zeroize();
|
||||
self.enc_iv.zeroize();
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle fake TLS handshake
|
||||
pub async fn handle_tls_handshake<R, W>(
|
||||
handshake: &[u8],
|
||||
@@ -42,84 +85,149 @@ pub async fn handle_tls_handshake<R, W>(
|
||||
peer: SocketAddr,
|
||||
config: &ProxyConfig,
|
||||
replay_checker: &ReplayChecker,
|
||||
) -> HandshakeResult<(FakeTlsReader<R>, FakeTlsWriter<W>, String)>
|
||||
rng: &SecureRandom,
|
||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
) -> HandshakeResult<(FakeTlsReader<R>, FakeTlsWriter<W>, String), R, W>
|
||||
where
|
||||
R: AsyncRead + Unpin,
|
||||
W: AsyncWrite + Unpin,
|
||||
{
|
||||
debug!(peer = %peer, handshake_len = handshake.len(), "Processing TLS handshake");
|
||||
|
||||
// Check minimum length
|
||||
|
||||
if handshake.len() < tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN + 1 {
|
||||
debug!(peer = %peer, "TLS handshake too short");
|
||||
return HandshakeResult::BadClient;
|
||||
return HandshakeResult::BadClient { reader, writer };
|
||||
}
|
||||
|
||||
// Extract digest for replay check
|
||||
|
||||
let digest = &handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN];
|
||||
let digest_half = &digest[..tls::TLS_DIGEST_HALF_LEN];
|
||||
|
||||
// Check for replay
|
||||
if replay_checker.check_tls_digest(digest_half) {
|
||||
warn!(peer = %peer, "TLS replay attack detected");
|
||||
return HandshakeResult::BadClient;
|
||||
|
||||
if replay_checker.check_and_add_tls_digest(digest_half) {
|
||||
warn!(peer = %peer, "TLS replay attack detected (duplicate digest)");
|
||||
return HandshakeResult::BadClient { reader, writer };
|
||||
}
|
||||
|
||||
// Build secrets list
|
||||
let secrets: Vec<(String, Vec<u8>)> = config.users.iter()
|
||||
.filter_map(|(name, hex)| {
|
||||
hex::decode(hex).ok().map(|bytes| (name.clone(), bytes))
|
||||
})
|
||||
.collect();
|
||||
|
||||
debug!(peer = %peer, num_users = secrets.len(), "Validating TLS handshake against users");
|
||||
|
||||
// Validate handshake
|
||||
|
||||
let secrets = decode_user_secrets(config, None);
|
||||
|
||||
let validation = match tls::validate_tls_handshake(
|
||||
handshake,
|
||||
&secrets,
|
||||
config.ignore_time_skew,
|
||||
config.access.ignore_time_skew,
|
||||
) {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
debug!(peer = %peer, "TLS handshake validation failed - no matching user");
|
||||
return HandshakeResult::BadClient;
|
||||
debug!(
|
||||
peer = %peer,
|
||||
ignore_time_skew = config.access.ignore_time_skew,
|
||||
"TLS handshake validation failed - no matching user or time skew"
|
||||
);
|
||||
return HandshakeResult::BadClient { reader, writer };
|
||||
}
|
||||
};
|
||||
|
||||
// Get secret for response
|
||||
|
||||
let secret = match secrets.iter().find(|(name, _)| *name == validation.user) {
|
||||
Some((_, s)) => s,
|
||||
None => return HandshakeResult::BadClient,
|
||||
None => return HandshakeResult::BadClient { reader, writer },
|
||||
};
|
||||
|
||||
// Build and send response
|
||||
let response = tls::build_server_hello(
|
||||
secret,
|
||||
&validation.digest,
|
||||
&validation.session_id,
|
||||
config.fake_cert_len,
|
||||
);
|
||||
|
||||
|
||||
let cached = if config.censorship.tls_emulation {
|
||||
if let Some(cache) = tls_cache.as_ref() {
|
||||
let selected_domain = if let Some(sni) = tls::extract_sni_from_client_hello(handshake) {
|
||||
if cache.contains_domain(&sni).await {
|
||||
sni
|
||||
} else {
|
||||
config.censorship.tls_domain.clone()
|
||||
}
|
||||
} else {
|
||||
config.censorship.tls_domain.clone()
|
||||
};
|
||||
let cached_entry = cache.get(&selected_domain).await;
|
||||
let use_full_cert_payload = cache
|
||||
.take_full_cert_budget_for_ip(
|
||||
peer.ip(),
|
||||
Duration::from_secs(config.censorship.tls_full_cert_ttl_secs),
|
||||
)
|
||||
.await;
|
||||
Some((cached_entry, use_full_cert_payload))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let alpn_list = if config.censorship.alpn_enforce {
|
||||
tls::extract_alpn_from_client_hello(handshake)
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
let selected_alpn = if config.censorship.alpn_enforce {
|
||||
if alpn_list.iter().any(|p| p == b"h2") {
|
||||
Some(b"h2".to_vec())
|
||||
} else if alpn_list.iter().any(|p| p == b"http/1.1") {
|
||||
Some(b"http/1.1".to_vec())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let response = if let Some((cached_entry, use_full_cert_payload)) = cached {
|
||||
emulator::build_emulated_server_hello(
|
||||
secret,
|
||||
&validation.digest,
|
||||
&validation.session_id,
|
||||
&cached_entry,
|
||||
use_full_cert_payload,
|
||||
rng,
|
||||
selected_alpn.clone(),
|
||||
config.censorship.tls_new_session_tickets,
|
||||
)
|
||||
} else {
|
||||
tls::build_server_hello(
|
||||
secret,
|
||||
&validation.digest,
|
||||
&validation.session_id,
|
||||
config.censorship.fake_cert_len,
|
||||
rng,
|
||||
selected_alpn.clone(),
|
||||
config.censorship.tls_new_session_tickets,
|
||||
)
|
||||
};
|
||||
|
||||
// Optional anti-fingerprint delay before sending ServerHello.
|
||||
if config.censorship.server_hello_delay_max_ms > 0 {
|
||||
let min = config.censorship.server_hello_delay_min_ms;
|
||||
let max = config.censorship.server_hello_delay_max_ms.max(min);
|
||||
let delay_ms = if max == min {
|
||||
max
|
||||
} else {
|
||||
rand::rng().random_range(min..=max)
|
||||
};
|
||||
if delay_ms > 0 {
|
||||
tokio::time::sleep(std::time::Duration::from_millis(delay_ms)).await;
|
||||
}
|
||||
}
|
||||
|
||||
debug!(peer = %peer, response_len = response.len(), "Sending TLS ServerHello");
|
||||
|
||||
|
||||
if let Err(e) = writer.write_all(&response).await {
|
||||
warn!(peer = %peer, error = %e, "Failed to write TLS ServerHello");
|
||||
return HandshakeResult::Error(ProxyError::Io(e));
|
||||
}
|
||||
|
||||
|
||||
if let Err(e) = writer.flush().await {
|
||||
warn!(peer = %peer, error = %e, "Failed to flush TLS ServerHello");
|
||||
return HandshakeResult::Error(ProxyError::Io(e));
|
||||
}
|
||||
|
||||
// Record for replay protection
|
||||
replay_checker.add_tls_digest(digest_half);
|
||||
|
||||
info!(
|
||||
|
||||
debug!(
|
||||
peer = %peer,
|
||||
user = %validation.user,
|
||||
"TLS handshake successful"
|
||||
);
|
||||
|
||||
|
||||
HandshakeResult::Success((
|
||||
FakeTlsReader::new(reader),
|
||||
FakeTlsWriter::new(writer),
|
||||
@@ -136,111 +244,81 @@ pub async fn handle_mtproto_handshake<R, W>(
|
||||
config: &ProxyConfig,
|
||||
replay_checker: &ReplayChecker,
|
||||
is_tls: bool,
|
||||
) -> HandshakeResult<(CryptoReader<R>, CryptoWriter<W>, HandshakeSuccess)>
|
||||
preferred_user: Option<&str>,
|
||||
) -> HandshakeResult<(CryptoReader<R>, CryptoWriter<W>, HandshakeSuccess), R, W>
|
||||
where
|
||||
R: AsyncRead + Unpin + Send,
|
||||
W: AsyncWrite + Unpin + Send,
|
||||
{
|
||||
trace!(peer = %peer, handshake = ?hex::encode(handshake), "MTProto handshake bytes");
|
||||
|
||||
// Extract prekey and IV
|
||||
|
||||
let dec_prekey_iv = &handshake[SKIP_LEN..SKIP_LEN + PREKEY_LEN + IV_LEN];
|
||||
|
||||
debug!(
|
||||
peer = %peer,
|
||||
dec_prekey_iv = %hex::encode(dec_prekey_iv),
|
||||
"Extracted prekey+IV from handshake"
|
||||
);
|
||||
|
||||
// Check for replay
|
||||
if replay_checker.check_handshake(dec_prekey_iv) {
|
||||
|
||||
if replay_checker.check_and_add_handshake(dec_prekey_iv) {
|
||||
warn!(peer = %peer, "MTProto replay attack detected");
|
||||
return HandshakeResult::BadClient;
|
||||
return HandshakeResult::BadClient { reader, writer };
|
||||
}
|
||||
|
||||
// Reversed for encryption direction
|
||||
|
||||
let enc_prekey_iv: Vec<u8> = dec_prekey_iv.iter().rev().copied().collect();
|
||||
|
||||
// Try each user's secret
|
||||
for (user, secret_hex) in &config.users {
|
||||
let secret = match hex::decode(secret_hex) {
|
||||
Ok(s) => s,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
// Derive decryption key
|
||||
|
||||
let decoded_users = decode_user_secrets(config, preferred_user);
|
||||
|
||||
for (user, secret) in decoded_users {
|
||||
|
||||
let dec_prekey = &dec_prekey_iv[..PREKEY_LEN];
|
||||
let dec_iv_bytes = &dec_prekey_iv[PREKEY_LEN..];
|
||||
|
||||
|
||||
let mut dec_key_input = Vec::with_capacity(PREKEY_LEN + secret.len());
|
||||
dec_key_input.extend_from_slice(dec_prekey);
|
||||
dec_key_input.extend_from_slice(&secret);
|
||||
let dec_key = sha256(&dec_key_input);
|
||||
|
||||
|
||||
let dec_iv = u128::from_be_bytes(dec_iv_bytes.try_into().unwrap());
|
||||
|
||||
// Decrypt handshake to check protocol tag
|
||||
|
||||
let mut decryptor = AesCtr::new(&dec_key, dec_iv);
|
||||
let decrypted = decryptor.decrypt(handshake);
|
||||
|
||||
trace!(
|
||||
peer = %peer,
|
||||
user = %user,
|
||||
decrypted_tail = %hex::encode(&decrypted[PROTO_TAG_POS..]),
|
||||
"Decrypted handshake tail"
|
||||
);
|
||||
|
||||
// Check protocol tag
|
||||
|
||||
let tag_bytes: [u8; 4] = decrypted[PROTO_TAG_POS..PROTO_TAG_POS + 4]
|
||||
.try_into()
|
||||
.unwrap();
|
||||
|
||||
|
||||
let proto_tag = match ProtoTag::from_bytes(tag_bytes) {
|
||||
Some(tag) => tag,
|
||||
None => {
|
||||
trace!(peer = %peer, user = %user, tag = %hex::encode(tag_bytes), "Invalid proto tag");
|
||||
continue;
|
||||
}
|
||||
None => continue,
|
||||
};
|
||||
|
||||
debug!(peer = %peer, user = %user, proto = ?proto_tag, "Found valid proto tag");
|
||||
|
||||
// Check if mode is enabled
|
||||
|
||||
let mode_ok = match proto_tag {
|
||||
ProtoTag::Secure => {
|
||||
if is_tls { config.modes.tls } else { config.modes.secure }
|
||||
if is_tls {
|
||||
config.general.modes.tls || config.general.modes.secure
|
||||
} else {
|
||||
config.general.modes.secure || config.general.modes.tls
|
||||
}
|
||||
}
|
||||
ProtoTag::Intermediate | ProtoTag::Abridged => config.modes.classic,
|
||||
ProtoTag::Intermediate | ProtoTag::Abridged => config.general.modes.classic,
|
||||
};
|
||||
|
||||
|
||||
if !mode_ok {
|
||||
debug!(peer = %peer, user = %user, proto = ?proto_tag, "Mode not enabled");
|
||||
continue;
|
||||
}
|
||||
|
||||
// Extract DC index
|
||||
|
||||
let dc_idx = i16::from_le_bytes(
|
||||
decrypted[DC_IDX_POS..DC_IDX_POS + 2].try_into().unwrap()
|
||||
);
|
||||
|
||||
// Derive encryption key
|
||||
|
||||
let enc_prekey = &enc_prekey_iv[..PREKEY_LEN];
|
||||
let enc_iv_bytes = &enc_prekey_iv[PREKEY_LEN..];
|
||||
|
||||
|
||||
let mut enc_key_input = Vec::with_capacity(PREKEY_LEN + secret.len());
|
||||
enc_key_input.extend_from_slice(enc_prekey);
|
||||
enc_key_input.extend_from_slice(&secret);
|
||||
let enc_key = sha256(&enc_key_input);
|
||||
|
||||
|
||||
let enc_iv = u128::from_be_bytes(enc_iv_bytes.try_into().unwrap());
|
||||
|
||||
// Record for replay protection
|
||||
replay_checker.add_handshake(dec_prekey_iv);
|
||||
|
||||
// Create new cipher instances
|
||||
let decryptor = AesCtr::new(&dec_key, dec_iv);
|
||||
|
||||
let encryptor = AesCtr::new(&enc_key, enc_iv);
|
||||
|
||||
|
||||
let success = HandshakeSuccess {
|
||||
user: user.clone(),
|
||||
dc_idx,
|
||||
@@ -252,8 +330,8 @@ where
|
||||
peer,
|
||||
is_tls,
|
||||
};
|
||||
|
||||
info!(
|
||||
|
||||
debug!(
|
||||
peer = %peer,
|
||||
user = %user,
|
||||
dc = dc_idx,
|
||||
@@ -261,151 +339,170 @@ where
|
||||
tls = is_tls,
|
||||
"MTProto handshake successful"
|
||||
);
|
||||
|
||||
|
||||
let max_pending = config.general.crypto_pending_buffer;
|
||||
return HandshakeResult::Success((
|
||||
CryptoReader::new(reader, decryptor),
|
||||
CryptoWriter::new(writer, encryptor),
|
||||
CryptoWriter::new(writer, encryptor, max_pending),
|
||||
success,
|
||||
));
|
||||
}
|
||||
|
||||
|
||||
debug!(peer = %peer, "MTProto handshake: no matching user found");
|
||||
HandshakeResult::BadClient
|
||||
HandshakeResult::BadClient { reader, writer }
|
||||
}
|
||||
|
||||
/// Generate nonce for Telegram connection
|
||||
///
|
||||
/// In FAST MODE: we use the same keys for TG as for client, but reversed.
|
||||
/// This means: client's enc_key becomes TG's dec_key and vice versa.
|
||||
pub fn generate_tg_nonce(
|
||||
proto_tag: ProtoTag,
|
||||
client_dec_key: &[u8; 32],
|
||||
client_dec_iv: u128,
|
||||
dc_idx: i16,
|
||||
_client_dec_key: &[u8; 32],
|
||||
_client_dec_iv: u128,
|
||||
client_enc_key: &[u8; 32],
|
||||
client_enc_iv: u128,
|
||||
rng: &SecureRandom,
|
||||
fast_mode: bool,
|
||||
) -> ([u8; HANDSHAKE_LEN], [u8; 32], u128, [u8; 32], u128) {
|
||||
loop {
|
||||
let bytes = SECURE_RANDOM.bytes(HANDSHAKE_LEN);
|
||||
let bytes = rng.bytes(HANDSHAKE_LEN);
|
||||
let mut nonce: [u8; HANDSHAKE_LEN] = bytes.try_into().unwrap();
|
||||
|
||||
// Check reserved patterns
|
||||
if RESERVED_NONCE_FIRST_BYTES.contains(&nonce[0]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
if RESERVED_NONCE_FIRST_BYTES.contains(&nonce[0]) { continue; }
|
||||
|
||||
let first_four: [u8; 4] = nonce[..4].try_into().unwrap();
|
||||
if RESERVED_NONCE_BEGINNINGS.contains(&first_four) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if RESERVED_NONCE_BEGINNINGS.contains(&first_four) { continue; }
|
||||
|
||||
let continue_four: [u8; 4] = nonce[4..8].try_into().unwrap();
|
||||
if RESERVED_NONCE_CONTINUES.contains(&continue_four) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Set protocol tag
|
||||
if RESERVED_NONCE_CONTINUES.contains(&continue_four) { continue; }
|
||||
|
||||
nonce[PROTO_TAG_POS..PROTO_TAG_POS + 4].copy_from_slice(&proto_tag.to_bytes());
|
||||
|
||||
// Fast mode: copy client's dec_key+iv (this becomes TG's enc direction)
|
||||
// In fast mode, we make TG use the same keys as client but swapped:
|
||||
// - What we decrypt FROM TG = what we encrypt TO client (so no re-encryption needed)
|
||||
// - What we encrypt TO TG = what we decrypt FROM client
|
||||
// CRITICAL: write dc_idx so upstream DC knows where to route
|
||||
nonce[DC_IDX_POS..DC_IDX_POS + 2].copy_from_slice(&dc_idx.to_le_bytes());
|
||||
|
||||
if fast_mode {
|
||||
// Put client's dec_key + dec_iv into nonce[8:56]
|
||||
// This will be used by TG for encryption TO us
|
||||
nonce[SKIP_LEN..SKIP_LEN + KEY_LEN].copy_from_slice(client_dec_key);
|
||||
nonce[SKIP_LEN + KEY_LEN..SKIP_LEN + KEY_LEN + IV_LEN]
|
||||
.copy_from_slice(&client_dec_iv.to_be_bytes());
|
||||
let mut key_iv = Vec::with_capacity(KEY_LEN + IV_LEN);
|
||||
key_iv.extend_from_slice(client_enc_key);
|
||||
key_iv.extend_from_slice(&client_enc_iv.to_be_bytes());
|
||||
key_iv.reverse(); // Python/C behavior: reversed enc_key+enc_iv in nonce
|
||||
nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN].copy_from_slice(&key_iv);
|
||||
}
|
||||
|
||||
// Now compute what keys WE will use for TG connection
|
||||
// enc_key_iv = nonce[8:56] (for encrypting TO TG)
|
||||
// dec_key_iv = nonce[8:56] reversed (for decrypting FROM TG)
|
||||
|
||||
let enc_key_iv = &nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN];
|
||||
let dec_key_iv: Vec<u8> = enc_key_iv.iter().rev().copied().collect();
|
||||
|
||||
|
||||
let tg_enc_key: [u8; 32] = enc_key_iv[..KEY_LEN].try_into().unwrap();
|
||||
let tg_enc_iv = u128::from_be_bytes(enc_key_iv[KEY_LEN..].try_into().unwrap());
|
||||
|
||||
|
||||
let tg_dec_key: [u8; 32] = dec_key_iv[..KEY_LEN].try_into().unwrap();
|
||||
let tg_dec_iv = u128::from_be_bytes(dec_key_iv[KEY_LEN..].try_into().unwrap());
|
||||
|
||||
debug!(
|
||||
fast_mode = fast_mode,
|
||||
tg_enc_key = %hex::encode(&tg_enc_key[..8]),
|
||||
tg_dec_key = %hex::encode(&tg_dec_key[..8]),
|
||||
"Generated TG nonce"
|
||||
);
|
||||
|
||||
|
||||
return (nonce, tg_enc_key, tg_enc_iv, tg_dec_key, tg_dec_iv);
|
||||
}
|
||||
}
|
||||
|
||||
/// Encrypt nonce for sending to Telegram
|
||||
///
|
||||
/// Only the part from PROTO_TAG_POS onwards is encrypted.
|
||||
/// The encryption key is derived from enc_key_iv in the nonce itself.
|
||||
pub fn encrypt_tg_nonce(nonce: &[u8; HANDSHAKE_LEN]) -> Vec<u8> {
|
||||
// enc_key_iv is at nonce[8:56]
|
||||
/// Encrypt nonce for sending to Telegram and return cipher objects with correct counter state
|
||||
pub fn encrypt_tg_nonce_with_ciphers(nonce: &[u8; HANDSHAKE_LEN]) -> (Vec<u8>, AesCtr, AesCtr) {
|
||||
let enc_key_iv = &nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN];
|
||||
|
||||
// Key for encrypting is just the first 32 bytes of enc_key_iv
|
||||
let key: [u8; 32] = enc_key_iv[..KEY_LEN].try_into().unwrap();
|
||||
let iv = u128::from_be_bytes(enc_key_iv[KEY_LEN..].try_into().unwrap());
|
||||
|
||||
let mut encryptor = AesCtr::new(&key, iv);
|
||||
|
||||
// Encrypt the entire nonce first, then take only the encrypted tail
|
||||
let encrypted_full = encryptor.encrypt(nonce);
|
||||
|
||||
// Result: unencrypted head + encrypted tail
|
||||
let dec_key_iv: Vec<u8> = enc_key_iv.iter().rev().copied().collect();
|
||||
|
||||
let enc_key: [u8; 32] = enc_key_iv[..KEY_LEN].try_into().unwrap();
|
||||
let enc_iv = u128::from_be_bytes(enc_key_iv[KEY_LEN..].try_into().unwrap());
|
||||
|
||||
let dec_key: [u8; 32] = dec_key_iv[..KEY_LEN].try_into().unwrap();
|
||||
let dec_iv = u128::from_be_bytes(dec_key_iv[KEY_LEN..].try_into().unwrap());
|
||||
|
||||
let mut encryptor = AesCtr::new(&enc_key, enc_iv);
|
||||
let encrypted_full = encryptor.encrypt(nonce); // counter: 0 → 4
|
||||
|
||||
let mut result = nonce[..PROTO_TAG_POS].to_vec();
|
||||
result.extend_from_slice(&encrypted_full[PROTO_TAG_POS..]);
|
||||
|
||||
trace!(
|
||||
original = %hex::encode(&nonce[PROTO_TAG_POS..]),
|
||||
encrypted = %hex::encode(&result[PROTO_TAG_POS..]),
|
||||
"Encrypted nonce tail"
|
||||
);
|
||||
|
||||
result
|
||||
|
||||
let decryptor = AesCtr::new(&dec_key, dec_iv);
|
||||
|
||||
(result, encryptor, decryptor)
|
||||
}
|
||||
|
||||
/// Encrypt nonce for sending to Telegram (legacy function for compatibility)
|
||||
pub fn encrypt_tg_nonce(nonce: &[u8; HANDSHAKE_LEN]) -> Vec<u8> {
|
||||
let (encrypted, _, _) = encrypt_tg_nonce_with_ciphers(nonce);
|
||||
encrypted
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_generate_tg_nonce() {
|
||||
let client_dec_key = [0x42u8; 32];
|
||||
let client_dec_iv = 12345u128;
|
||||
|
||||
let (nonce, tg_enc_key, tg_enc_iv, tg_dec_key, tg_dec_iv) =
|
||||
generate_tg_nonce(ProtoTag::Secure, &client_dec_key, client_dec_iv, false);
|
||||
|
||||
// Check length
|
||||
let client_enc_key = [0x24u8; 32];
|
||||
let client_enc_iv = 54321u128;
|
||||
|
||||
let rng = SecureRandom::new();
|
||||
let (nonce, _tg_enc_key, _tg_enc_iv, _tg_dec_key, _tg_dec_iv) =
|
||||
generate_tg_nonce(
|
||||
ProtoTag::Secure,
|
||||
2,
|
||||
&client_dec_key,
|
||||
client_dec_iv,
|
||||
&client_enc_key,
|
||||
client_enc_iv,
|
||||
&rng,
|
||||
false,
|
||||
);
|
||||
|
||||
assert_eq!(nonce.len(), HANDSHAKE_LEN);
|
||||
|
||||
// Check proto tag is set
|
||||
|
||||
let tag_bytes: [u8; 4] = nonce[PROTO_TAG_POS..PROTO_TAG_POS + 4].try_into().unwrap();
|
||||
assert_eq!(ProtoTag::from_bytes(tag_bytes), Some(ProtoTag::Secure));
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_tg_nonce() {
|
||||
let client_dec_key = [0x42u8; 32];
|
||||
let client_dec_iv = 12345u128;
|
||||
|
||||
let client_enc_key = [0x24u8; 32];
|
||||
let client_enc_iv = 54321u128;
|
||||
|
||||
let rng = SecureRandom::new();
|
||||
let (nonce, _, _, _, _) =
|
||||
generate_tg_nonce(ProtoTag::Secure, &client_dec_key, client_dec_iv, false);
|
||||
|
||||
generate_tg_nonce(
|
||||
ProtoTag::Secure,
|
||||
2,
|
||||
&client_dec_key,
|
||||
client_dec_iv,
|
||||
&client_enc_key,
|
||||
client_enc_iv,
|
||||
&rng,
|
||||
false,
|
||||
);
|
||||
|
||||
let encrypted = encrypt_tg_nonce(&nonce);
|
||||
|
||||
|
||||
assert_eq!(encrypted.len(), HANDSHAKE_LEN);
|
||||
|
||||
// First PROTO_TAG_POS bytes should be unchanged
|
||||
assert_eq!(&encrypted[..PROTO_TAG_POS], &nonce[..PROTO_TAG_POS]);
|
||||
|
||||
// Rest should be different (encrypted)
|
||||
assert_ne!(&encrypted[PROTO_TAG_POS..], &nonce[PROTO_TAG_POS..]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_handshake_success_zeroize_on_drop() {
|
||||
let success = HandshakeSuccess {
|
||||
user: "test".to_string(),
|
||||
dc_idx: 2,
|
||||
proto_tag: ProtoTag::Secure,
|
||||
dec_key: [0xAA; 32],
|
||||
dec_iv: 0xBBBBBBBB,
|
||||
enc_key: [0xCC; 32],
|
||||
enc_iv: 0xDDDDDDDD,
|
||||
peer: "127.0.0.1:1234".parse().unwrap(),
|
||||
is_tls: true,
|
||||
};
|
||||
|
||||
assert_eq!(success.dec_key, [0xAA; 32]);
|
||||
assert_eq!(success.enc_key, [0xCC; 32]);
|
||||
|
||||
drop(success);
|
||||
// Drop impl zeroizes key material without panic
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,72 +1,214 @@
|
||||
//! Masking - forward unrecognized traffic to mask host
|
||||
|
||||
use std::str;
|
||||
use std::net::SocketAddr;
|
||||
use std::time::Duration;
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
#[cfg(unix)]
|
||||
use tokio::net::UnixStream;
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::time::timeout;
|
||||
use tracing::debug;
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::transport::set_linger_zero;
|
||||
use crate::network::dns_overrides::resolve_socket_addr;
|
||||
use crate::stats::beobachten::BeobachtenStore;
|
||||
use crate::transport::proxy_protocol::{ProxyProtocolV1Builder, ProxyProtocolV2Builder};
|
||||
|
||||
const MASK_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
/// Maximum duration for the entire masking relay.
|
||||
/// Limits resource consumption from slow-loris attacks and port scanners.
|
||||
const MASK_RELAY_TIMEOUT: Duration = Duration::from_secs(60);
|
||||
const MASK_BUFFER_SIZE: usize = 8192;
|
||||
|
||||
/// Detect client type based on initial data
|
||||
fn detect_client_type(data: &[u8]) -> &'static str {
|
||||
// Check for HTTP request
|
||||
if data.len() > 4
|
||||
&& (data.starts_with(b"GET ") || data.starts_with(b"POST") ||
|
||||
data.starts_with(b"HEAD") || data.starts_with(b"PUT ") ||
|
||||
data.starts_with(b"DELETE") || data.starts_with(b"OPTIONS"))
|
||||
{
|
||||
return "HTTP";
|
||||
}
|
||||
|
||||
// Check for TLS ClientHello (0x16 = handshake, 0x03 0x01-0x03 = TLS version)
|
||||
if data.len() > 3 && data[0] == 0x16 && data[1] == 0x03 {
|
||||
return "TLS-scanner";
|
||||
}
|
||||
|
||||
// Check for SSH
|
||||
if data.starts_with(b"SSH-") {
|
||||
return "SSH";
|
||||
}
|
||||
|
||||
// Port scanner (very short data)
|
||||
if data.len() < 10 {
|
||||
return "port-scanner";
|
||||
}
|
||||
|
||||
"unknown"
|
||||
}
|
||||
|
||||
/// Handle a bad client by forwarding to mask host
|
||||
pub async fn handle_bad_client(
|
||||
mut client: TcpStream,
|
||||
pub async fn handle_bad_client<R, W>(
|
||||
reader: R,
|
||||
writer: W,
|
||||
initial_data: &[u8],
|
||||
peer: SocketAddr,
|
||||
local_addr: SocketAddr,
|
||||
config: &ProxyConfig,
|
||||
) {
|
||||
if !config.mask {
|
||||
beobachten: &BeobachtenStore,
|
||||
)
|
||||
where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
let client_type = detect_client_type(initial_data);
|
||||
if config.general.beobachten {
|
||||
let ttl = Duration::from_secs(config.general.beobachten_minutes.saturating_mul(60));
|
||||
beobachten.record(client_type, peer.ip(), ttl);
|
||||
}
|
||||
|
||||
if !config.censorship.mask {
|
||||
// Masking disabled, just consume data
|
||||
consume_client_data(client).await;
|
||||
consume_client_data(reader).await;
|
||||
return;
|
||||
}
|
||||
|
||||
let mask_host = config.mask_host.as_deref()
|
||||
.unwrap_or(&config.tls_domain);
|
||||
let mask_port = config.mask_port;
|
||||
|
||||
|
||||
// Connect via Unix socket or TCP
|
||||
#[cfg(unix)]
|
||||
if let Some(ref sock_path) = config.censorship.mask_unix_sock {
|
||||
debug!(
|
||||
client_type = client_type,
|
||||
sock = %sock_path,
|
||||
data_len = initial_data.len(),
|
||||
"Forwarding bad client to mask unix socket"
|
||||
);
|
||||
|
||||
let connect_result = timeout(MASK_TIMEOUT, UnixStream::connect(sock_path)).await;
|
||||
match connect_result {
|
||||
Ok(Ok(stream)) => {
|
||||
let (mask_read, mut mask_write) = stream.into_split();
|
||||
let proxy_header: Option<Vec<u8>> = match config.censorship.mask_proxy_protocol {
|
||||
0 => None,
|
||||
version => {
|
||||
let header = match version {
|
||||
2 => ProxyProtocolV2Builder::new().with_addrs(peer, local_addr).build(),
|
||||
_ => match (peer, local_addr) {
|
||||
(SocketAddr::V4(src), SocketAddr::V4(dst)) =>
|
||||
ProxyProtocolV1Builder::new().tcp4(src.into(), dst.into()).build(),
|
||||
(SocketAddr::V6(src), SocketAddr::V6(dst)) =>
|
||||
ProxyProtocolV1Builder::new().tcp6(src.into(), dst.into()).build(),
|
||||
_ =>
|
||||
ProxyProtocolV1Builder::new().build(),
|
||||
},
|
||||
};
|
||||
Some(header)
|
||||
}
|
||||
};
|
||||
if let Some(header) = proxy_header {
|
||||
if mask_write.write_all(&header).await.is_err() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if timeout(MASK_RELAY_TIMEOUT, relay_to_mask(reader, writer, mask_read, mask_write, initial_data)).await.is_err() {
|
||||
debug!("Mask relay timed out (unix socket)");
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
debug!(error = %e, "Failed to connect to mask unix socket");
|
||||
consume_client_data(reader).await;
|
||||
}
|
||||
Err(_) => {
|
||||
debug!("Timeout connecting to mask unix socket");
|
||||
consume_client_data(reader).await;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let mask_host = config.censorship.mask_host.as_deref()
|
||||
.unwrap_or(&config.censorship.tls_domain);
|
||||
let mask_port = config.censorship.mask_port;
|
||||
|
||||
debug!(
|
||||
client_type = client_type,
|
||||
host = %mask_host,
|
||||
port = mask_port,
|
||||
data_len = initial_data.len(),
|
||||
"Forwarding bad client to mask host"
|
||||
);
|
||||
|
||||
// Connect to mask host
|
||||
let mask_addr = format!("{}:{}", mask_host, mask_port);
|
||||
let connect_result = timeout(
|
||||
MASK_TIMEOUT,
|
||||
TcpStream::connect(&mask_addr)
|
||||
).await;
|
||||
|
||||
let mut mask_stream = match connect_result {
|
||||
Ok(Ok(s)) => s,
|
||||
|
||||
// Apply runtime DNS override for mask target when configured.
|
||||
let mask_addr = resolve_socket_addr(mask_host, mask_port)
|
||||
.map(|addr| addr.to_string())
|
||||
.unwrap_or_else(|| format!("{}:{}", mask_host, mask_port));
|
||||
let connect_result = timeout(MASK_TIMEOUT, TcpStream::connect(&mask_addr)).await;
|
||||
match connect_result {
|
||||
Ok(Ok(stream)) => {
|
||||
let proxy_header: Option<Vec<u8>> = match config.censorship.mask_proxy_protocol {
|
||||
0 => None,
|
||||
version => {
|
||||
let header = match version {
|
||||
2 => ProxyProtocolV2Builder::new().with_addrs(peer, local_addr).build(),
|
||||
_ => match (peer, local_addr) {
|
||||
(SocketAddr::V4(src), SocketAddr::V4(dst)) =>
|
||||
ProxyProtocolV1Builder::new().tcp4(src.into(), dst.into()).build(),
|
||||
(SocketAddr::V6(src), SocketAddr::V6(dst)) =>
|
||||
ProxyProtocolV1Builder::new().tcp6(src.into(), dst.into()).build(),
|
||||
_ =>
|
||||
ProxyProtocolV1Builder::new().build(),
|
||||
},
|
||||
};
|
||||
Some(header)
|
||||
}
|
||||
};
|
||||
|
||||
let (mask_read, mut mask_write) = stream.into_split();
|
||||
if let Some(header) = proxy_header {
|
||||
if mask_write.write_all(&header).await.is_err() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if timeout(MASK_RELAY_TIMEOUT, relay_to_mask(reader, writer, mask_read, mask_write, initial_data)).await.is_err() {
|
||||
debug!("Mask relay timed out");
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
debug!(error = %e, "Failed to connect to mask host");
|
||||
consume_client_data(client).await;
|
||||
return;
|
||||
consume_client_data(reader).await;
|
||||
}
|
||||
Err(_) => {
|
||||
debug!("Timeout connecting to mask host");
|
||||
consume_client_data(client).await;
|
||||
return;
|
||||
consume_client_data(reader).await;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/// Relay traffic between client and mask backend
|
||||
async fn relay_to_mask<R, W, MR, MW>(
|
||||
mut reader: R,
|
||||
mut writer: W,
|
||||
mut mask_read: MR,
|
||||
mut mask_write: MW,
|
||||
initial_data: &[u8],
|
||||
)
|
||||
where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
MR: AsyncRead + Unpin + Send + 'static,
|
||||
MW: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
// Send initial data to mask host
|
||||
if mask_stream.write_all(initial_data).await.is_err() {
|
||||
if mask_write.write_all(initial_data).await.is_err() {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
// Relay traffic
|
||||
let (mut client_read, mut client_write) = client.into_split();
|
||||
let (mut mask_read, mut mask_write) = mask_stream.into_split();
|
||||
|
||||
let c2m = tokio::spawn(async move {
|
||||
let mut buf = vec![0u8; MASK_BUFFER_SIZE];
|
||||
loop {
|
||||
match client_read.read(&mut buf).await {
|
||||
match reader.read(&mut buf).await {
|
||||
Ok(0) | Err(_) => {
|
||||
let _ = mask_write.shutdown().await;
|
||||
break;
|
||||
@@ -79,24 +221,24 @@ pub async fn handle_bad_client(
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
let m2c = tokio::spawn(async move {
|
||||
let mut buf = vec![0u8; MASK_BUFFER_SIZE];
|
||||
loop {
|
||||
match mask_read.read(&mut buf).await {
|
||||
Ok(0) | Err(_) => {
|
||||
let _ = client_write.shutdown().await;
|
||||
let _ = writer.shutdown().await;
|
||||
break;
|
||||
}
|
||||
Ok(n) => {
|
||||
if client_write.write_all(&buf[..n]).await.is_err() {
|
||||
if writer.write_all(&buf[..n]).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
// Wait for either to complete
|
||||
tokio::select! {
|
||||
_ = c2m => {}
|
||||
@@ -105,11 +247,11 @@ pub async fn handle_bad_client(
|
||||
}
|
||||
|
||||
/// Just consume all data from client without responding
|
||||
async fn consume_client_data(mut client: TcpStream) {
|
||||
async fn consume_client_data<R: AsyncRead + Unpin>(mut reader: R) {
|
||||
let mut buf = vec![0u8; MASK_BUFFER_SIZE];
|
||||
while let Ok(n) = client.read(&mut buf).await {
|
||||
while let Ok(n) = reader.read(&mut buf).await {
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
783
src/proxy/middle_relay.rs
Normal file
783
src/proxy/middle_relay.rs
Normal file
@@ -0,0 +1,783 @@
|
||||
use std::collections::HashMap;
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::{Arc, Mutex, OnceLock};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use bytes::Bytes;
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
use tracing::{debug, trace, warn};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::error::{ProxyError, Result};
|
||||
use crate::protocol::constants::{*, secure_padding_len};
|
||||
use crate::proxy::handshake::HandshakeSuccess;
|
||||
use crate::stats::Stats;
|
||||
use crate::stream::{BufferPool, CryptoReader, CryptoWriter};
|
||||
use crate::transport::middle_proxy::{MePool, MeResponse, proto_flags_for_tag};
|
||||
|
||||
enum C2MeCommand {
|
||||
Data { payload: Bytes, flags: u32 },
|
||||
Close,
|
||||
}
|
||||
|
||||
const DESYNC_DEDUP_WINDOW: Duration = Duration::from_secs(60);
|
||||
const DESYNC_ERROR_CLASS: &str = "frame_too_large_crypto_desync";
|
||||
const C2ME_CHANNEL_CAPACITY_FALLBACK: usize = 128;
|
||||
const C2ME_SOFT_PRESSURE_MIN_FREE_SLOTS: usize = 64;
|
||||
const C2ME_SENDER_FAIRNESS_BUDGET: usize = 32;
|
||||
static DESYNC_DEDUP: OnceLock<Mutex<HashMap<u64, Instant>>> = OnceLock::new();
|
||||
|
||||
struct RelayForensicsState {
|
||||
trace_id: u64,
|
||||
conn_id: u64,
|
||||
user: String,
|
||||
peer: SocketAddr,
|
||||
peer_hash: u64,
|
||||
started_at: Instant,
|
||||
bytes_c2me: u64,
|
||||
bytes_me2c: Arc<AtomicU64>,
|
||||
desync_all_full: bool,
|
||||
}
|
||||
|
||||
fn hash_value<T: Hash>(value: &T) -> u64 {
|
||||
let mut hasher = DefaultHasher::new();
|
||||
value.hash(&mut hasher);
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
fn hash_ip(ip: IpAddr) -> u64 {
|
||||
hash_value(&ip)
|
||||
}
|
||||
|
||||
fn should_emit_full_desync(key: u64, all_full: bool, now: Instant) -> bool {
|
||||
if all_full {
|
||||
return true;
|
||||
}
|
||||
|
||||
let dedup = DESYNC_DEDUP.get_or_init(|| Mutex::new(HashMap::new()));
|
||||
let mut guard = dedup.lock().expect("desync dedup mutex poisoned");
|
||||
guard.retain(|_, seen_at| now.duration_since(*seen_at) < DESYNC_DEDUP_WINDOW);
|
||||
|
||||
match guard.get_mut(&key) {
|
||||
Some(seen_at) => {
|
||||
if now.duration_since(*seen_at) >= DESYNC_DEDUP_WINDOW {
|
||||
*seen_at = now;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
None => {
|
||||
guard.insert(key, now);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn report_desync_frame_too_large(
|
||||
state: &RelayForensicsState,
|
||||
proto_tag: ProtoTag,
|
||||
frame_counter: u64,
|
||||
max_frame: usize,
|
||||
len: usize,
|
||||
raw_len_bytes: Option<[u8; 4]>,
|
||||
stats: &Stats,
|
||||
) -> ProxyError {
|
||||
let len_buf = raw_len_bytes.unwrap_or((len as u32).to_le_bytes());
|
||||
let looks_like_tls = raw_len_bytes
|
||||
.map(|b| b[0] == 0x16 && b[1] == 0x03)
|
||||
.unwrap_or(false);
|
||||
let looks_like_http = raw_len_bytes
|
||||
.map(|b| matches!(b[0], b'G' | b'P' | b'H' | b'C' | b'D'))
|
||||
.unwrap_or(false);
|
||||
let now = Instant::now();
|
||||
let dedup_key = hash_value(&(
|
||||
state.user.as_str(),
|
||||
state.peer_hash,
|
||||
proto_tag,
|
||||
DESYNC_ERROR_CLASS,
|
||||
));
|
||||
let emit_full = should_emit_full_desync(dedup_key, state.desync_all_full, now);
|
||||
let duration_ms = state.started_at.elapsed().as_millis() as u64;
|
||||
let bytes_me2c = state.bytes_me2c.load(Ordering::Relaxed);
|
||||
|
||||
stats.increment_desync_total();
|
||||
stats.observe_desync_frames_ok(frame_counter);
|
||||
if emit_full {
|
||||
stats.increment_desync_full_logged();
|
||||
warn!(
|
||||
trace_id = format_args!("0x{:016x}", state.trace_id),
|
||||
conn_id = state.conn_id,
|
||||
user = %state.user,
|
||||
peer_hash = format_args!("0x{:016x}", state.peer_hash),
|
||||
proto = ?proto_tag,
|
||||
mode = "middle_proxy",
|
||||
is_tls = true,
|
||||
duration_ms,
|
||||
bytes_c2me = state.bytes_c2me,
|
||||
bytes_me2c,
|
||||
raw_len = len,
|
||||
raw_len_hex = format_args!("0x{:08x}", len),
|
||||
raw_bytes = format_args!(
|
||||
"{:02x} {:02x} {:02x} {:02x}",
|
||||
len_buf[0], len_buf[1], len_buf[2], len_buf[3]
|
||||
),
|
||||
max_frame,
|
||||
tls_like = looks_like_tls,
|
||||
http_like = looks_like_http,
|
||||
frames_ok = frame_counter,
|
||||
dedup_window_secs = DESYNC_DEDUP_WINDOW.as_secs(),
|
||||
desync_all_full = state.desync_all_full,
|
||||
full_reason = if state.desync_all_full { "desync_all_full" } else { "first_in_dedup_window" },
|
||||
error_class = DESYNC_ERROR_CLASS,
|
||||
"Frame too large — crypto desync forensics"
|
||||
);
|
||||
debug!(
|
||||
trace_id = format_args!("0x{:016x}", state.trace_id),
|
||||
conn_id = state.conn_id,
|
||||
user = %state.user,
|
||||
peer = %state.peer,
|
||||
"Frame too large forensic peer detail"
|
||||
);
|
||||
} else {
|
||||
stats.increment_desync_suppressed();
|
||||
debug!(
|
||||
trace_id = format_args!("0x{:016x}", state.trace_id),
|
||||
conn_id = state.conn_id,
|
||||
user = %state.user,
|
||||
peer_hash = format_args!("0x{:016x}", state.peer_hash),
|
||||
proto = ?proto_tag,
|
||||
duration_ms,
|
||||
bytes_c2me = state.bytes_c2me,
|
||||
bytes_me2c,
|
||||
raw_len = len,
|
||||
frames_ok = frame_counter,
|
||||
dedup_window_secs = DESYNC_DEDUP_WINDOW.as_secs(),
|
||||
error_class = DESYNC_ERROR_CLASS,
|
||||
"Frame too large — crypto desync forensic suppressed"
|
||||
);
|
||||
}
|
||||
|
||||
ProxyError::Proxy(format!(
|
||||
"Frame too large: {len} (max {max_frame}), frames_ok={frame_counter}, conn_id={}, trace_id=0x{:016x}",
|
||||
state.conn_id,
|
||||
state.trace_id
|
||||
))
|
||||
}
|
||||
|
||||
fn should_yield_c2me_sender(sent_since_yield: usize, has_backlog: bool) -> bool {
|
||||
has_backlog && sent_since_yield >= C2ME_SENDER_FAIRNESS_BUDGET
|
||||
}
|
||||
|
||||
async fn enqueue_c2me_command(
|
||||
tx: &mpsc::Sender<C2MeCommand>,
|
||||
cmd: C2MeCommand,
|
||||
) -> std::result::Result<(), mpsc::error::SendError<C2MeCommand>> {
|
||||
match tx.try_send(cmd) {
|
||||
Ok(()) => Ok(()),
|
||||
Err(mpsc::error::TrySendError::Closed(cmd)) => Err(mpsc::error::SendError(cmd)),
|
||||
Err(mpsc::error::TrySendError::Full(cmd)) => {
|
||||
// Cooperative yield reduces burst catch-up when the per-conn queue is near saturation.
|
||||
if tx.capacity() <= C2ME_SOFT_PRESSURE_MIN_FREE_SLOTS {
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
tx.send(cmd).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_via_middle_proxy<R, W>(
|
||||
mut crypto_reader: CryptoReader<R>,
|
||||
crypto_writer: CryptoWriter<W>,
|
||||
success: HandshakeSuccess,
|
||||
me_pool: Arc<MePool>,
|
||||
stats: Arc<Stats>,
|
||||
config: Arc<ProxyConfig>,
|
||||
_buffer_pool: Arc<BufferPool>,
|
||||
local_addr: SocketAddr,
|
||||
rng: Arc<SecureRandom>,
|
||||
) -> Result<()>
|
||||
where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
let user = success.user.clone();
|
||||
let peer = success.peer;
|
||||
let proto_tag = success.proto_tag;
|
||||
let pool_generation = me_pool.current_generation();
|
||||
|
||||
debug!(
|
||||
user = %user,
|
||||
peer = %peer,
|
||||
dc = success.dc_idx,
|
||||
proto = ?proto_tag,
|
||||
mode = "middle_proxy",
|
||||
pool_generation,
|
||||
"Routing via Middle-End"
|
||||
);
|
||||
|
||||
let (conn_id, me_rx) = me_pool.registry().register().await;
|
||||
let trace_id = conn_id;
|
||||
let bytes_me2c = Arc::new(AtomicU64::new(0));
|
||||
let mut forensics = RelayForensicsState {
|
||||
trace_id,
|
||||
conn_id,
|
||||
user: user.clone(),
|
||||
peer,
|
||||
peer_hash: hash_ip(peer.ip()),
|
||||
started_at: Instant::now(),
|
||||
bytes_c2me: 0,
|
||||
bytes_me2c: bytes_me2c.clone(),
|
||||
desync_all_full: config.general.desync_all_full,
|
||||
};
|
||||
|
||||
stats.increment_user_connects(&user);
|
||||
stats.increment_user_curr_connects(&user);
|
||||
stats.increment_current_connections_me();
|
||||
|
||||
// Per-user ad_tag from access.user_ad_tags; fallback to general.ad_tag (hot-reloadable)
|
||||
let user_tag: Option<Vec<u8>> = config
|
||||
.access
|
||||
.user_ad_tags
|
||||
.get(&user)
|
||||
.and_then(|s| hex::decode(s).ok())
|
||||
.filter(|v| v.len() == 16);
|
||||
let global_tag: Option<Vec<u8>> = config
|
||||
.general
|
||||
.ad_tag
|
||||
.as_ref()
|
||||
.and_then(|s| hex::decode(s).ok())
|
||||
.filter(|v| v.len() == 16);
|
||||
let effective_tag = user_tag.or(global_tag);
|
||||
|
||||
let proto_flags = proto_flags_for_tag(proto_tag, effective_tag.is_some());
|
||||
debug!(
|
||||
trace_id = format_args!("0x{:016x}", trace_id),
|
||||
user = %user,
|
||||
conn_id,
|
||||
peer_hash = format_args!("0x{:016x}", forensics.peer_hash),
|
||||
desync_all_full = forensics.desync_all_full,
|
||||
proto_flags = format_args!("0x{:08x}", proto_flags),
|
||||
pool_generation,
|
||||
"ME relay started"
|
||||
);
|
||||
|
||||
let translated_local_addr = me_pool.translate_our_addr(local_addr);
|
||||
|
||||
let frame_limit = config.general.max_client_frame;
|
||||
|
||||
let c2me_channel_capacity = config
|
||||
.general
|
||||
.me_c2me_channel_capacity
|
||||
.max(C2ME_CHANNEL_CAPACITY_FALLBACK);
|
||||
let (c2me_tx, mut c2me_rx) = mpsc::channel::<C2MeCommand>(c2me_channel_capacity);
|
||||
let me_pool_c2me = me_pool.clone();
|
||||
let effective_tag = effective_tag;
|
||||
let c2me_sender = tokio::spawn(async move {
|
||||
let mut sent_since_yield = 0usize;
|
||||
while let Some(cmd) = c2me_rx.recv().await {
|
||||
match cmd {
|
||||
C2MeCommand::Data { payload, flags } => {
|
||||
me_pool_c2me.send_proxy_req(
|
||||
conn_id,
|
||||
success.dc_idx,
|
||||
peer,
|
||||
translated_local_addr,
|
||||
payload.as_ref(),
|
||||
flags,
|
||||
effective_tag.as_deref(),
|
||||
).await?;
|
||||
sent_since_yield = sent_since_yield.saturating_add(1);
|
||||
if should_yield_c2me_sender(sent_since_yield, !c2me_rx.is_empty()) {
|
||||
sent_since_yield = 0;
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
}
|
||||
C2MeCommand::Close => {
|
||||
let _ = me_pool_c2me.send_close(conn_id).await;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
|
||||
let (stop_tx, mut stop_rx) = oneshot::channel::<()>();
|
||||
let mut me_rx_task = me_rx;
|
||||
let stats_clone = stats.clone();
|
||||
let rng_clone = rng.clone();
|
||||
let user_clone = user.clone();
|
||||
let bytes_me2c_clone = bytes_me2c.clone();
|
||||
let me_writer = tokio::spawn(async move {
|
||||
let mut writer = crypto_writer;
|
||||
let mut frame_buf = Vec::with_capacity(16 * 1024);
|
||||
loop {
|
||||
tokio::select! {
|
||||
msg = me_rx_task.recv() => {
|
||||
match msg {
|
||||
Some(MeResponse::Data { flags, data }) => {
|
||||
trace!(conn_id, bytes = data.len(), flags, "ME->C data");
|
||||
bytes_me2c_clone.fetch_add(data.len() as u64, Ordering::Relaxed);
|
||||
stats_clone.add_user_octets_to(&user_clone, data.len() as u64);
|
||||
write_client_payload(
|
||||
&mut writer,
|
||||
proto_tag,
|
||||
flags,
|
||||
&data,
|
||||
rng_clone.as_ref(),
|
||||
&mut frame_buf,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Drain all immediately queued ME responses and flush once.
|
||||
while let Ok(next) = me_rx_task.try_recv() {
|
||||
match next {
|
||||
MeResponse::Data { flags, data } => {
|
||||
trace!(conn_id, bytes = data.len(), flags, "ME->C data (batched)");
|
||||
bytes_me2c_clone.fetch_add(data.len() as u64, Ordering::Relaxed);
|
||||
stats_clone.add_user_octets_to(&user_clone, data.len() as u64);
|
||||
write_client_payload(
|
||||
&mut writer,
|
||||
proto_tag,
|
||||
flags,
|
||||
&data,
|
||||
rng_clone.as_ref(),
|
||||
&mut frame_buf,
|
||||
).await?;
|
||||
}
|
||||
MeResponse::Ack(confirm) => {
|
||||
trace!(conn_id, confirm, "ME->C quickack (batched)");
|
||||
write_client_ack(&mut writer, proto_tag, confirm).await?;
|
||||
}
|
||||
MeResponse::Close => {
|
||||
debug!(conn_id, "ME sent close (batched)");
|
||||
let _ = writer.flush().await;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
writer.flush().await.map_err(ProxyError::Io)?;
|
||||
}
|
||||
Some(MeResponse::Ack(confirm)) => {
|
||||
trace!(conn_id, confirm, "ME->C quickack");
|
||||
write_client_ack(&mut writer, proto_tag, confirm).await?;
|
||||
}
|
||||
Some(MeResponse::Close) => {
|
||||
debug!(conn_id, "ME sent close");
|
||||
let _ = writer.flush().await;
|
||||
return Ok(());
|
||||
}
|
||||
None => {
|
||||
debug!(conn_id, "ME channel closed");
|
||||
return Err(ProxyError::Proxy("ME connection lost".into()));
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = &mut stop_rx => {
|
||||
debug!(conn_id, "ME writer stop signal");
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let mut main_result: Result<()> = Ok(());
|
||||
let mut client_closed = false;
|
||||
let mut frame_counter: u64 = 0;
|
||||
loop {
|
||||
match read_client_payload(
|
||||
&mut crypto_reader,
|
||||
proto_tag,
|
||||
frame_limit,
|
||||
&forensics,
|
||||
&mut frame_counter,
|
||||
&stats,
|
||||
).await {
|
||||
Ok(Some((payload, quickack))) => {
|
||||
trace!(conn_id, bytes = payload.len(), "C->ME frame");
|
||||
forensics.bytes_c2me = forensics
|
||||
.bytes_c2me
|
||||
.saturating_add(payload.len() as u64);
|
||||
stats.add_user_octets_from(&user, payload.len() as u64);
|
||||
let mut flags = proto_flags;
|
||||
if quickack {
|
||||
flags |= RPC_FLAG_QUICKACK;
|
||||
}
|
||||
if payload.len() >= 8 && payload[..8].iter().all(|b| *b == 0) {
|
||||
flags |= RPC_FLAG_NOT_ENCRYPTED;
|
||||
}
|
||||
// Keep client read loop lightweight: route heavy ME send path via a dedicated task.
|
||||
if enqueue_c2me_command(&c2me_tx, C2MeCommand::Data { payload, flags })
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
main_result = Err(ProxyError::Proxy("ME sender channel closed".into()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(None) => {
|
||||
debug!(conn_id, "Client EOF");
|
||||
client_closed = true;
|
||||
let _ = enqueue_c2me_command(&c2me_tx, C2MeCommand::Close).await;
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
main_result = Err(e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
drop(c2me_tx);
|
||||
let c2me_result = c2me_sender
|
||||
.await
|
||||
.unwrap_or_else(|e| Err(ProxyError::Proxy(format!("ME sender join error: {e}"))));
|
||||
|
||||
let _ = stop_tx.send(());
|
||||
let mut writer_result = me_writer
|
||||
.await
|
||||
.unwrap_or_else(|e| Err(ProxyError::Proxy(format!("ME writer join error: {e}"))));
|
||||
|
||||
// When client closes, but ME channel stopped as unregistered - it isnt error
|
||||
if client_closed
|
||||
&& matches!(
|
||||
writer_result,
|
||||
Err(ProxyError::Proxy(ref msg)) if msg == "ME connection lost"
|
||||
)
|
||||
{
|
||||
writer_result = Ok(());
|
||||
}
|
||||
|
||||
let result = match (main_result, c2me_result, writer_result) {
|
||||
(Ok(()), Ok(()), Ok(())) => Ok(()),
|
||||
(Err(e), _, _) => Err(e),
|
||||
(_, Err(e), _) => Err(e),
|
||||
(_, _, Err(e)) => Err(e),
|
||||
};
|
||||
|
||||
debug!(
|
||||
user = %user,
|
||||
conn_id,
|
||||
trace_id = format_args!("0x{:016x}", trace_id),
|
||||
duration_ms = forensics.started_at.elapsed().as_millis() as u64,
|
||||
bytes_c2me = forensics.bytes_c2me,
|
||||
bytes_me2c = forensics.bytes_me2c.load(Ordering::Relaxed),
|
||||
frames_ok = frame_counter,
|
||||
"ME relay cleanup"
|
||||
);
|
||||
me_pool.registry().unregister(conn_id).await;
|
||||
stats.decrement_current_connections_me();
|
||||
stats.decrement_user_curr_connects(&user);
|
||||
result
|
||||
}
|
||||
|
||||
async fn read_client_payload<R>(
|
||||
client_reader: &mut CryptoReader<R>,
|
||||
proto_tag: ProtoTag,
|
||||
max_frame: usize,
|
||||
forensics: &RelayForensicsState,
|
||||
frame_counter: &mut u64,
|
||||
stats: &Stats,
|
||||
) -> Result<Option<(Bytes, bool)>>
|
||||
where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
{
|
||||
loop {
|
||||
let (len, quickack, raw_len_bytes) = match proto_tag {
|
||||
ProtoTag::Abridged => {
|
||||
let mut first = [0u8; 1];
|
||||
match client_reader.read_exact(&mut first).await {
|
||||
Ok(_) => {}
|
||||
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => return Ok(None),
|
||||
Err(e) => return Err(ProxyError::Io(e)),
|
||||
}
|
||||
|
||||
let quickack = (first[0] & 0x80) != 0;
|
||||
let len_words = if (first[0] & 0x7f) == 0x7f {
|
||||
let mut ext = [0u8; 3];
|
||||
client_reader
|
||||
.read_exact(&mut ext)
|
||||
.await
|
||||
.map_err(ProxyError::Io)?;
|
||||
u32::from_le_bytes([ext[0], ext[1], ext[2], 0]) as usize
|
||||
} else {
|
||||
(first[0] & 0x7f) as usize
|
||||
};
|
||||
|
||||
let len = len_words
|
||||
.checked_mul(4)
|
||||
.ok_or_else(|| ProxyError::Proxy("Abridged frame length overflow".into()))?;
|
||||
(len, quickack, None)
|
||||
}
|
||||
ProtoTag::Intermediate | ProtoTag::Secure => {
|
||||
let mut len_buf = [0u8; 4];
|
||||
match client_reader.read_exact(&mut len_buf).await {
|
||||
Ok(_) => {}
|
||||
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => return Ok(None),
|
||||
Err(e) => return Err(ProxyError::Io(e)),
|
||||
}
|
||||
let quickack = (len_buf[3] & 0x80) != 0;
|
||||
(
|
||||
(u32::from_le_bytes(len_buf) & 0x7fff_ffff) as usize,
|
||||
quickack,
|
||||
Some(len_buf),
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
if len == 0 {
|
||||
continue;
|
||||
}
|
||||
if len < 4 && proto_tag != ProtoTag::Abridged {
|
||||
warn!(
|
||||
trace_id = format_args!("0x{:016x}", forensics.trace_id),
|
||||
conn_id = forensics.conn_id,
|
||||
user = %forensics.user,
|
||||
len,
|
||||
proto = ?proto_tag,
|
||||
"Frame too small — corrupt or probe"
|
||||
);
|
||||
return Err(ProxyError::Proxy(format!("Frame too small: {len}")));
|
||||
}
|
||||
|
||||
if len > max_frame {
|
||||
return Err(report_desync_frame_too_large(
|
||||
forensics,
|
||||
proto_tag,
|
||||
*frame_counter,
|
||||
max_frame,
|
||||
len,
|
||||
raw_len_bytes,
|
||||
stats,
|
||||
));
|
||||
}
|
||||
|
||||
let secure_payload_len = if proto_tag == ProtoTag::Secure {
|
||||
match secure_payload_len_from_wire_len(len) {
|
||||
Some(payload_len) => payload_len,
|
||||
None => {
|
||||
stats.increment_secure_padding_invalid();
|
||||
return Err(ProxyError::Proxy(format!(
|
||||
"Invalid secure frame length: {len}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
len
|
||||
};
|
||||
|
||||
let mut payload = vec![0u8; len];
|
||||
client_reader
|
||||
.read_exact(&mut payload)
|
||||
.await
|
||||
.map_err(ProxyError::Io)?;
|
||||
|
||||
// Secure Intermediate: strip validated trailing padding bytes.
|
||||
if proto_tag == ProtoTag::Secure {
|
||||
payload.truncate(secure_payload_len);
|
||||
}
|
||||
*frame_counter += 1;
|
||||
return Ok(Some((Bytes::from(payload), quickack)));
|
||||
}
|
||||
}
|
||||
|
||||
async fn write_client_payload<W>(
|
||||
client_writer: &mut CryptoWriter<W>,
|
||||
proto_tag: ProtoTag,
|
||||
flags: u32,
|
||||
data: &[u8],
|
||||
rng: &SecureRandom,
|
||||
frame_buf: &mut Vec<u8>,
|
||||
) -> Result<()>
|
||||
where
|
||||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
let quickack = (flags & RPC_FLAG_QUICKACK) != 0;
|
||||
|
||||
match proto_tag {
|
||||
ProtoTag::Abridged => {
|
||||
if !data.len().is_multiple_of(4) {
|
||||
return Err(ProxyError::Proxy(format!(
|
||||
"Abridged payload must be 4-byte aligned, got {}",
|
||||
data.len()
|
||||
)));
|
||||
}
|
||||
|
||||
let len_words = data.len() / 4;
|
||||
if len_words < 0x7f {
|
||||
let mut first = len_words as u8;
|
||||
if quickack {
|
||||
first |= 0x80;
|
||||
}
|
||||
frame_buf.clear();
|
||||
frame_buf.reserve(1 + data.len());
|
||||
frame_buf.push(first);
|
||||
frame_buf.extend_from_slice(data);
|
||||
client_writer
|
||||
.write_all(frame_buf)
|
||||
.await
|
||||
.map_err(ProxyError::Io)?;
|
||||
} else if len_words < (1 << 24) {
|
||||
let mut first = 0x7fu8;
|
||||
if quickack {
|
||||
first |= 0x80;
|
||||
}
|
||||
let lw = (len_words as u32).to_le_bytes();
|
||||
frame_buf.clear();
|
||||
frame_buf.reserve(4 + data.len());
|
||||
frame_buf.extend_from_slice(&[first, lw[0], lw[1], lw[2]]);
|
||||
frame_buf.extend_from_slice(data);
|
||||
client_writer
|
||||
.write_all(frame_buf)
|
||||
.await
|
||||
.map_err(ProxyError::Io)?;
|
||||
} else {
|
||||
return Err(ProxyError::Proxy(format!(
|
||||
"Abridged frame too large: {}",
|
||||
data.len()
|
||||
)));
|
||||
}
|
||||
}
|
||||
ProtoTag::Intermediate | ProtoTag::Secure => {
|
||||
let padding_len = if proto_tag == ProtoTag::Secure {
|
||||
if !is_valid_secure_payload_len(data.len()) {
|
||||
return Err(ProxyError::Proxy(format!(
|
||||
"Secure payload must be 4-byte aligned, got {}",
|
||||
data.len()
|
||||
)));
|
||||
}
|
||||
secure_padding_len(data.len(), rng)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let mut len_val = (data.len() + padding_len) as u32;
|
||||
if quickack {
|
||||
len_val |= 0x8000_0000;
|
||||
}
|
||||
let total = 4 + data.len() + padding_len;
|
||||
frame_buf.clear();
|
||||
frame_buf.reserve(total);
|
||||
frame_buf.extend_from_slice(&len_val.to_le_bytes());
|
||||
frame_buf.extend_from_slice(data);
|
||||
if padding_len > 0 {
|
||||
let start = frame_buf.len();
|
||||
frame_buf.resize(start + padding_len, 0);
|
||||
rng.fill(&mut frame_buf[start..]);
|
||||
}
|
||||
client_writer
|
||||
.write_all(frame_buf)
|
||||
.await
|
||||
.map_err(ProxyError::Io)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn write_client_ack<W>(
|
||||
client_writer: &mut CryptoWriter<W>,
|
||||
proto_tag: ProtoTag,
|
||||
confirm: u32,
|
||||
) -> Result<()>
|
||||
where
|
||||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
let bytes = if proto_tag == ProtoTag::Abridged {
|
||||
confirm.to_be_bytes()
|
||||
} else {
|
||||
confirm.to_le_bytes()
|
||||
};
|
||||
client_writer
|
||||
.write_all(&bytes)
|
||||
.await
|
||||
.map_err(ProxyError::Io)?;
|
||||
// ACK should remain low-latency.
|
||||
client_writer.flush().await.map_err(ProxyError::Io)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tokio::time::{Duration as TokioDuration, timeout};
|
||||
|
||||
#[test]
|
||||
fn should_yield_sender_only_on_budget_with_backlog() {
|
||||
assert!(!should_yield_c2me_sender(0, true));
|
||||
assert!(!should_yield_c2me_sender(C2ME_SENDER_FAIRNESS_BUDGET - 1, true));
|
||||
assert!(!should_yield_c2me_sender(C2ME_SENDER_FAIRNESS_BUDGET, false));
|
||||
assert!(should_yield_c2me_sender(C2ME_SENDER_FAIRNESS_BUDGET, true));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn enqueue_c2me_command_uses_try_send_fast_path() {
|
||||
let (tx, mut rx) = mpsc::channel::<C2MeCommand>(2);
|
||||
enqueue_c2me_command(
|
||||
&tx,
|
||||
C2MeCommand::Data {
|
||||
payload: Bytes::from_static(&[1, 2, 3]),
|
||||
flags: 0,
|
||||
},
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let recv = timeout(TokioDuration::from_millis(50), rx.recv())
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
match recv {
|
||||
C2MeCommand::Data { payload, flags } => {
|
||||
assert_eq!(payload.as_ref(), &[1, 2, 3]);
|
||||
assert_eq!(flags, 0);
|
||||
}
|
||||
C2MeCommand::Close => panic!("unexpected close command"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn enqueue_c2me_command_falls_back_to_send_when_queue_is_full() {
|
||||
let (tx, mut rx) = mpsc::channel::<C2MeCommand>(1);
|
||||
tx.send(C2MeCommand::Data {
|
||||
payload: Bytes::from_static(&[9]),
|
||||
flags: 9,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let tx2 = tx.clone();
|
||||
let producer = tokio::spawn(async move {
|
||||
enqueue_c2me_command(
|
||||
&tx2,
|
||||
C2MeCommand::Data {
|
||||
payload: Bytes::from_static(&[7, 7]),
|
||||
flags: 7,
|
||||
},
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
});
|
||||
|
||||
let _ = timeout(TokioDuration::from_millis(100), rx.recv())
|
||||
.await
|
||||
.unwrap();
|
||||
producer.await.unwrap();
|
||||
|
||||
let recv = timeout(TokioDuration::from_millis(100), rx.recv())
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
match recv {
|
||||
C2MeCommand::Data { payload, flags } => {
|
||||
assert_eq!(payload.as_ref(), &[7, 7]);
|
||||
assert_eq!(flags, 7);
|
||||
}
|
||||
C2MeCommand::Close => panic!("unexpected close command"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,16 @@
|
||||
//! Proxy Defs
|
||||
|
||||
pub mod handshake;
|
||||
pub mod client;
|
||||
pub mod relay;
|
||||
pub mod direct_relay;
|
||||
pub mod handshake;
|
||||
pub mod masking;
|
||||
pub mod middle_relay;
|
||||
pub mod relay;
|
||||
|
||||
pub use handshake::*;
|
||||
pub use client::ClientHandler;
|
||||
#[allow(unused_imports)]
|
||||
pub use handshake::*;
|
||||
#[allow(unused_imports)]
|
||||
pub use masking::*;
|
||||
#[allow(unused_imports)]
|
||||
pub use relay::*;
|
||||
pub use masking::*;
|
||||
@@ -1,22 +1,320 @@
|
||||
//! Bidirectional Relay
|
||||
//! Bidirectional Relay — poll-based, no head-of-line blocking
|
||||
//!
|
||||
//! ## What changed and why
|
||||
//!
|
||||
//! Previous implementation used a single-task `select! { biased; ... }` loop
|
||||
//! where each branch called `write_all()`. This caused head-of-line blocking:
|
||||
//! while `write_all()` waited for a slow writer (e.g. client on 3G downloading
|
||||
//! media), the entire loop was blocked — the other direction couldn't make progress.
|
||||
//!
|
||||
//! Symptoms observed in production:
|
||||
//! - Media loading at ~8 KB/s despite fast server connection
|
||||
//! - Stop-and-go pattern with 50–500ms gaps between chunks
|
||||
//! - `biased` select starving S→C direction
|
||||
//! - Some users unable to load media at all
|
||||
//!
|
||||
//! ## New architecture
|
||||
//!
|
||||
//! Uses `tokio::io::copy_bidirectional` which polls both directions concurrently
|
||||
//! in a single task via non-blocking `poll_read` / `poll_write` calls:
|
||||
//!
|
||||
//! Old (select! + write_all — BLOCKING):
|
||||
//!
|
||||
//! loop {
|
||||
//! select! {
|
||||
//! biased;
|
||||
//! data = client.read() => { server.write_all(data).await; } ← BLOCKS here
|
||||
//! data = server.read() => { client.write_all(data).await; } ← can't run
|
||||
//! }
|
||||
//! }
|
||||
//!
|
||||
//! New (copy_bidirectional — CONCURRENT):
|
||||
//!
|
||||
//! poll(cx) {
|
||||
//! // Both directions polled in the same poll cycle
|
||||
//! C→S: poll_read(client) → poll_write(server) // non-blocking
|
||||
//! S→C: poll_read(server) → poll_write(client) // non-blocking
|
||||
//! // If one writer is Pending, the other direction still progresses
|
||||
//! }
|
||||
//!
|
||||
//! Benefits:
|
||||
//! - No head-of-line blocking: slow client download doesn't block uploads
|
||||
//! - No biased starvation: fair polling of both directions
|
||||
//! - Proper flush: `copy_bidirectional` calls `poll_flush` when reader stalls,
|
||||
//! so CryptoWriter's pending ciphertext is always drained (fixes "stuck at 95%")
|
||||
//! - No deadlock risk: old write_all could deadlock when both TCP buffers filled;
|
||||
//! poll-based approach lets TCP flow control work correctly
|
||||
//!
|
||||
//! Stats tracking:
|
||||
//! - `StatsIo` wraps client side, intercepts `poll_read` / `poll_write`
|
||||
//! - `poll_read` on client = C→S (client sending) → `octets_from`, `msgs_from`
|
||||
//! - `poll_write` on client = S→C (to client) → `octets_to`, `msgs_to`
|
||||
//! - `SharedCounters` (atomics) let the watchdog read stats without locking
|
||||
|
||||
use std::io;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncReadExt, AsyncWriteExt};
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf, copy_bidirectional};
|
||||
use tokio::time::Instant;
|
||||
use tracing::{debug, trace, warn};
|
||||
use crate::error::Result;
|
||||
use crate::stats::Stats;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use crate::stream::BufferPool;
|
||||
|
||||
const BUFFER_SIZE: usize = 65536;
|
||||
// ============= Constants =============
|
||||
|
||||
/// Relay data bidirectionally between client and server
|
||||
/// Activity timeout for iOS compatibility.
|
||||
///
|
||||
/// iOS keeps Telegram connections alive in background for up to 30 minutes.
|
||||
/// Closing earlier causes unnecessary reconnects and handshake overhead.
|
||||
const ACTIVITY_TIMEOUT: Duration = Duration::from_secs(1800);
|
||||
|
||||
/// Watchdog check interval — also used for periodic rate logging.
|
||||
///
|
||||
/// 10 seconds gives responsive timeout detection (±10s accuracy)
|
||||
/// without measurable overhead from atomic reads.
|
||||
const WATCHDOG_INTERVAL: Duration = Duration::from_secs(10);
|
||||
|
||||
// ============= CombinedStream =============
|
||||
|
||||
/// Combines separate read and write halves into a single bidirectional stream.
|
||||
///
|
||||
/// `copy_bidirectional` requires `AsyncRead + AsyncWrite` on each side,
|
||||
/// but the handshake layer produces split reader/writer pairs
|
||||
/// (e.g. `CryptoReader<FakeTlsReader<OwnedReadHalf>>` + `CryptoWriter<...>`).
|
||||
///
|
||||
/// This wrapper reunifies them with zero overhead — each trait method
|
||||
/// delegates directly to the corresponding half. No buffering, no copies.
|
||||
///
|
||||
/// Safety: `poll_read` only touches `reader`, `poll_write` only touches `writer`,
|
||||
/// so there's no aliasing even though both are called on the same `&mut self`.
|
||||
struct CombinedStream<R, W> {
|
||||
reader: R,
|
||||
writer: W,
|
||||
}
|
||||
|
||||
impl<R, W> CombinedStream<R, W> {
|
||||
fn new(reader: R, writer: W) -> Self {
|
||||
Self { reader, writer }
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: AsyncRead + Unpin, W: Unpin> AsyncRead for CombinedStream<R, W> {
|
||||
#[inline]
|
||||
fn poll_read(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut ReadBuf<'_>,
|
||||
) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut self.get_mut().reader).poll_read(cx, buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Unpin, W: AsyncWrite + Unpin> AsyncWrite for CombinedStream<R, W> {
|
||||
#[inline]
|
||||
fn poll_write(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
Pin::new(&mut self.get_mut().writer).poll_write(cx, buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut self.get_mut().writer).poll_flush(cx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut self.get_mut().writer).poll_shutdown(cx)
|
||||
}
|
||||
}
|
||||
|
||||
// ============= SharedCounters =============
|
||||
|
||||
/// Atomic counters shared between the relay (via StatsIo) and the watchdog task.
|
||||
///
|
||||
/// Using `Relaxed` ordering is sufficient because:
|
||||
/// - Counters are monotonically increasing (no ABA problem)
|
||||
/// - Slight staleness in watchdog reads is harmless (±10s check interval anyway)
|
||||
/// - No ordering dependencies between different counters
|
||||
struct SharedCounters {
|
||||
/// Bytes read from client (C→S direction)
|
||||
c2s_bytes: AtomicU64,
|
||||
/// Bytes written to client (S→C direction)
|
||||
s2c_bytes: AtomicU64,
|
||||
/// Number of poll_read completions (≈ C→S chunks)
|
||||
c2s_ops: AtomicU64,
|
||||
/// Number of poll_write completions (≈ S→C chunks)
|
||||
s2c_ops: AtomicU64,
|
||||
/// Milliseconds since relay epoch of last I/O activity
|
||||
last_activity_ms: AtomicU64,
|
||||
}
|
||||
|
||||
impl SharedCounters {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
c2s_bytes: AtomicU64::new(0),
|
||||
s2c_bytes: AtomicU64::new(0),
|
||||
c2s_ops: AtomicU64::new(0),
|
||||
s2c_ops: AtomicU64::new(0),
|
||||
last_activity_ms: AtomicU64::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
/// Record activity at this instant.
|
||||
#[inline]
|
||||
fn touch(&self, now: Instant, epoch: Instant) {
|
||||
let ms = now.duration_since(epoch).as_millis() as u64;
|
||||
self.last_activity_ms.store(ms, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// How long since last recorded activity.
|
||||
fn idle_duration(&self, now: Instant, epoch: Instant) -> Duration {
|
||||
let last_ms = self.last_activity_ms.load(Ordering::Relaxed);
|
||||
let now_ms = now.duration_since(epoch).as_millis() as u64;
|
||||
Duration::from_millis(now_ms.saturating_sub(last_ms))
|
||||
}
|
||||
}
|
||||
|
||||
// ============= StatsIo =============
|
||||
|
||||
/// Transparent I/O wrapper that tracks per-user statistics and activity.
|
||||
///
|
||||
/// Wraps the **client** side of the relay. Direction mapping:
|
||||
///
|
||||
/// | poll method | direction | stats updated |
|
||||
/// |-------------|-----------|--------------------------------------|
|
||||
/// | `poll_read` | C→S | `octets_from`, `msgs_from`, counters |
|
||||
/// | `poll_write` | S→C | `octets_to`, `msgs_to`, counters |
|
||||
///
|
||||
/// Both update the shared activity timestamp for the watchdog.
|
||||
///
|
||||
/// Note on message counts: the original code counted one `read()`/`write_all()`
|
||||
/// as one "message". Here we count `poll_read`/`poll_write` completions instead.
|
||||
/// Byte counts are identical; op counts may differ slightly due to different
|
||||
/// internal buffering in `copy_bidirectional`. This is fine for monitoring.
|
||||
struct StatsIo<S> {
|
||||
inner: S,
|
||||
counters: Arc<SharedCounters>,
|
||||
stats: Arc<Stats>,
|
||||
user: String,
|
||||
epoch: Instant,
|
||||
}
|
||||
|
||||
impl<S> StatsIo<S> {
|
||||
fn new(
|
||||
inner: S,
|
||||
counters: Arc<SharedCounters>,
|
||||
stats: Arc<Stats>,
|
||||
user: String,
|
||||
epoch: Instant,
|
||||
) -> Self {
|
||||
// Mark initial activity so the watchdog doesn't fire before data flows
|
||||
counters.touch(Instant::now(), epoch);
|
||||
Self { inner, counters, stats, user, epoch }
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: AsyncRead + Unpin> AsyncRead for StatsIo<S> {
|
||||
fn poll_read(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut ReadBuf<'_>,
|
||||
) -> Poll<io::Result<()>> {
|
||||
let this = self.get_mut();
|
||||
let before = buf.filled().len();
|
||||
|
||||
match Pin::new(&mut this.inner).poll_read(cx, buf) {
|
||||
Poll::Ready(Ok(())) => {
|
||||
let n = buf.filled().len() - before;
|
||||
if n > 0 {
|
||||
// C→S: client sent data
|
||||
this.counters.c2s_bytes.fetch_add(n as u64, Ordering::Relaxed);
|
||||
this.counters.c2s_ops.fetch_add(1, Ordering::Relaxed);
|
||||
this.counters.touch(Instant::now(), this.epoch);
|
||||
|
||||
this.stats.add_user_octets_from(&this.user, n as u64);
|
||||
this.stats.increment_user_msgs_from(&this.user);
|
||||
|
||||
trace!(user = %this.user, bytes = n, "C->S");
|
||||
}
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
other => other,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: AsyncWrite + Unpin> AsyncWrite for StatsIo<S> {
|
||||
fn poll_write(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
let this = self.get_mut();
|
||||
|
||||
match Pin::new(&mut this.inner).poll_write(cx, buf) {
|
||||
Poll::Ready(Ok(n)) => {
|
||||
if n > 0 {
|
||||
// S→C: data written to client
|
||||
this.counters.s2c_bytes.fetch_add(n as u64, Ordering::Relaxed);
|
||||
this.counters.s2c_ops.fetch_add(1, Ordering::Relaxed);
|
||||
this.counters.touch(Instant::now(), this.epoch);
|
||||
|
||||
this.stats.add_user_octets_to(&this.user, n as u64);
|
||||
this.stats.increment_user_msgs_to(&this.user);
|
||||
|
||||
trace!(user = %this.user, bytes = n, "S->C");
|
||||
}
|
||||
Poll::Ready(Ok(n))
|
||||
}
|
||||
other => other,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut self.get_mut().inner).poll_flush(cx)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut self.get_mut().inner).poll_shutdown(cx)
|
||||
}
|
||||
}
|
||||
|
||||
// ============= Relay =============
|
||||
|
||||
/// Relay data bidirectionally between client and server.
|
||||
///
|
||||
/// Uses `tokio::io::copy_bidirectional` for concurrent, non-blocking data transfer.
|
||||
///
|
||||
/// ## API compatibility
|
||||
///
|
||||
/// Signature is identical to the previous implementation. The `_buffer_pool`
|
||||
/// parameter is retained for call-site compatibility — `copy_bidirectional`
|
||||
/// manages its own internal buffers (8 KB per direction).
|
||||
///
|
||||
/// ## Guarantees preserved
|
||||
///
|
||||
/// - Activity timeout: 30 minutes of inactivity → clean shutdown
|
||||
/// - Per-user stats: bytes and ops counted per direction
|
||||
/// - Periodic rate logging: every 10 seconds when active
|
||||
/// - Clean shutdown: both write sides are shut down on exit
|
||||
/// - Error propagation: I/O errors are returned as `ProxyError::Io`
|
||||
pub async fn relay_bidirectional<CR, CW, SR, SW>(
|
||||
mut client_reader: CR,
|
||||
mut client_writer: CW,
|
||||
mut server_reader: SR,
|
||||
mut server_writer: SW,
|
||||
client_reader: CR,
|
||||
client_writer: CW,
|
||||
server_reader: SR,
|
||||
server_writer: SW,
|
||||
user: &str,
|
||||
stats: Arc<Stats>,
|
||||
_buffer_pool: Arc<BufferPool>,
|
||||
) -> Result<()>
|
||||
where
|
||||
CR: AsyncRead + Unpin + Send + 'static,
|
||||
@@ -24,139 +322,145 @@ where
|
||||
SR: AsyncRead + Unpin + Send + 'static,
|
||||
SW: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
let user_c2s = user.to_string();
|
||||
let user_s2c = user.to_string();
|
||||
|
||||
// Используем Arc::clone вместо stats.clone()
|
||||
let stats_c2s = Arc::clone(&stats);
|
||||
let stats_s2c = Arc::clone(&stats);
|
||||
|
||||
let c2s_bytes = Arc::new(AtomicU64::new(0));
|
||||
let s2c_bytes = Arc::new(AtomicU64::new(0));
|
||||
let c2s_bytes_clone = Arc::clone(&c2s_bytes);
|
||||
let s2c_bytes_clone = Arc::clone(&s2c_bytes);
|
||||
|
||||
// Client -> Server task
|
||||
let c2s = tokio::spawn(async move {
|
||||
let mut buf = vec![0u8; BUFFER_SIZE];
|
||||
let mut total_bytes = 0u64;
|
||||
let mut msg_count = 0u64;
|
||||
|
||||
let epoch = Instant::now();
|
||||
let counters = Arc::new(SharedCounters::new());
|
||||
let user_owned = user.to_string();
|
||||
|
||||
// ── Combine split halves into bidirectional streams ──────────────
|
||||
let client_combined = CombinedStream::new(client_reader, client_writer);
|
||||
let mut server = CombinedStream::new(server_reader, server_writer);
|
||||
|
||||
// Wrap client with stats/activity tracking
|
||||
let mut client = StatsIo::new(
|
||||
client_combined,
|
||||
Arc::clone(&counters),
|
||||
Arc::clone(&stats),
|
||||
user_owned.clone(),
|
||||
epoch,
|
||||
);
|
||||
|
||||
// ── Watchdog: activity timeout + periodic rate logging ──────────
|
||||
let wd_counters = Arc::clone(&counters);
|
||||
let wd_user = user_owned.clone();
|
||||
|
||||
let watchdog = async {
|
||||
let mut prev_c2s: u64 = 0;
|
||||
let mut prev_s2c: u64 = 0;
|
||||
|
||||
loop {
|
||||
match client_reader.read(&mut buf).await {
|
||||
Ok(0) => {
|
||||
debug!(
|
||||
user = %user_c2s,
|
||||
total_bytes = total_bytes,
|
||||
msgs = msg_count,
|
||||
"Client closed connection (C->S)"
|
||||
);
|
||||
let _ = server_writer.shutdown().await;
|
||||
break;
|
||||
}
|
||||
Ok(n) => {
|
||||
total_bytes += n as u64;
|
||||
msg_count += 1;
|
||||
c2s_bytes_clone.store(total_bytes, Ordering::Relaxed);
|
||||
|
||||
stats_c2s.add_user_octets_from(&user_c2s, n as u64);
|
||||
stats_c2s.increment_user_msgs_from(&user_c2s);
|
||||
|
||||
trace!(
|
||||
user = %user_c2s,
|
||||
bytes = n,
|
||||
total = total_bytes,
|
||||
data_preview = %hex::encode(&buf[..n.min(32)]),
|
||||
"C->S data"
|
||||
);
|
||||
|
||||
if let Err(e) = server_writer.write_all(&buf[..n]).await {
|
||||
debug!(user = %user_c2s, error = %e, "Failed to write to server");
|
||||
break;
|
||||
}
|
||||
if let Err(e) = server_writer.flush().await {
|
||||
debug!(user = %user_c2s, error = %e, "Failed to flush to server");
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
debug!(user = %user_c2s, error = %e, total_bytes = total_bytes, "Client read error");
|
||||
break;
|
||||
}
|
||||
tokio::time::sleep(WATCHDOG_INTERVAL).await;
|
||||
|
||||
let now = Instant::now();
|
||||
let idle = wd_counters.idle_duration(now, epoch);
|
||||
|
||||
// ── Activity timeout ────────────────────────────────────
|
||||
if idle >= ACTIVITY_TIMEOUT {
|
||||
let c2s = wd_counters.c2s_bytes.load(Ordering::Relaxed);
|
||||
let s2c = wd_counters.s2c_bytes.load(Ordering::Relaxed);
|
||||
warn!(
|
||||
user = %wd_user,
|
||||
c2s_bytes = c2s,
|
||||
s2c_bytes = s2c,
|
||||
idle_secs = idle.as_secs(),
|
||||
"Activity timeout"
|
||||
);
|
||||
return; // Causes select! to cancel copy_bidirectional
|
||||
}
|
||||
|
||||
// ── Periodic rate logging ───────────────────────────────
|
||||
let c2s = wd_counters.c2s_bytes.load(Ordering::Relaxed);
|
||||
let s2c = wd_counters.s2c_bytes.load(Ordering::Relaxed);
|
||||
let c2s_delta = c2s - prev_c2s;
|
||||
let s2c_delta = s2c - prev_s2c;
|
||||
|
||||
if c2s_delta > 0 || s2c_delta > 0 {
|
||||
let secs = WATCHDOG_INTERVAL.as_secs_f64();
|
||||
debug!(
|
||||
user = %wd_user,
|
||||
c2s_kbps = (c2s_delta as f64 / secs / 1024.0) as u64,
|
||||
s2c_kbps = (s2c_delta as f64 / secs / 1024.0) as u64,
|
||||
c2s_total = c2s,
|
||||
s2c_total = s2c,
|
||||
"Relay active"
|
||||
);
|
||||
}
|
||||
|
||||
prev_c2s = c2s;
|
||||
prev_s2c = s2c;
|
||||
}
|
||||
});
|
||||
|
||||
// Server -> Client task
|
||||
let s2c = tokio::spawn(async move {
|
||||
let mut buf = vec![0u8; BUFFER_SIZE];
|
||||
let mut total_bytes = 0u64;
|
||||
let mut msg_count = 0u64;
|
||||
|
||||
loop {
|
||||
match server_reader.read(&mut buf).await {
|
||||
Ok(0) => {
|
||||
debug!(
|
||||
user = %user_s2c,
|
||||
total_bytes = total_bytes,
|
||||
msgs = msg_count,
|
||||
"Server closed connection (S->C)"
|
||||
);
|
||||
let _ = client_writer.shutdown().await;
|
||||
break;
|
||||
}
|
||||
Ok(n) => {
|
||||
total_bytes += n as u64;
|
||||
msg_count += 1;
|
||||
s2c_bytes_clone.store(total_bytes, Ordering::Relaxed);
|
||||
|
||||
stats_s2c.add_user_octets_to(&user_s2c, n as u64);
|
||||
stats_s2c.increment_user_msgs_to(&user_s2c);
|
||||
|
||||
trace!(
|
||||
user = %user_s2c,
|
||||
bytes = n,
|
||||
total = total_bytes,
|
||||
data_preview = %hex::encode(&buf[..n.min(32)]),
|
||||
"S->C data"
|
||||
);
|
||||
|
||||
if let Err(e) = client_writer.write_all(&buf[..n]).await {
|
||||
debug!(user = %user_s2c, error = %e, "Failed to write to client");
|
||||
break;
|
||||
}
|
||||
if let Err(e) = client_writer.flush().await {
|
||||
debug!(user = %user_s2c, error = %e, "Failed to flush to client");
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
debug!(user = %user_s2c, error = %e, total_bytes = total_bytes, "Server read error");
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// ── Run bidirectional copy + watchdog concurrently ───────────────
|
||||
//
|
||||
// copy_bidirectional polls both directions in the same poll() call:
|
||||
// C→S: poll_read(client/StatsIo) → poll_write(server)
|
||||
// S→C: poll_read(server) → poll_write(client/StatsIo)
|
||||
//
|
||||
// When one direction's writer returns Pending, the other direction
|
||||
// continues — no head-of-line blocking.
|
||||
//
|
||||
// When the watchdog fires, select! drops the copy future,
|
||||
// releasing the &mut borrows on client and server.
|
||||
let copy_result = tokio::select! {
|
||||
result = copy_bidirectional(&mut client, &mut server) => Some(result),
|
||||
_ = watchdog => None, // Activity timeout — cancel relay
|
||||
};
|
||||
|
||||
// ── Clean shutdown ──────────────────────────────────────────────
|
||||
// After select!, the losing future is dropped, borrows released.
|
||||
// Shut down both write sides for clean TCP FIN.
|
||||
let _ = client.shutdown().await;
|
||||
let _ = server.shutdown().await;
|
||||
|
||||
// ── Final logging ───────────────────────────────────────────────
|
||||
let c2s_ops = counters.c2s_ops.load(Ordering::Relaxed);
|
||||
let s2c_ops = counters.s2c_ops.load(Ordering::Relaxed);
|
||||
let duration = epoch.elapsed();
|
||||
|
||||
match copy_result {
|
||||
Some(Ok((c2s, s2c))) => {
|
||||
// Normal completion — one side closed the connection
|
||||
debug!(
|
||||
user = %user_owned,
|
||||
c2s_bytes = c2s,
|
||||
s2c_bytes = s2c,
|
||||
c2s_msgs = c2s_ops,
|
||||
s2c_msgs = s2c_ops,
|
||||
duration_secs = duration.as_secs(),
|
||||
"Relay finished"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
});
|
||||
|
||||
// Wait for either direction to complete
|
||||
tokio::select! {
|
||||
result = c2s => {
|
||||
if let Err(e) = result {
|
||||
warn!(error = %e, "C->S task panicked");
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
// I/O error in one of the directions
|
||||
let c2s = counters.c2s_bytes.load(Ordering::Relaxed);
|
||||
let s2c = counters.s2c_bytes.load(Ordering::Relaxed);
|
||||
debug!(
|
||||
user = %user_owned,
|
||||
c2s_bytes = c2s,
|
||||
s2c_bytes = s2c,
|
||||
c2s_msgs = c2s_ops,
|
||||
s2c_msgs = s2c_ops,
|
||||
duration_secs = duration.as_secs(),
|
||||
error = %e,
|
||||
"Relay error"
|
||||
);
|
||||
Err(e.into())
|
||||
}
|
||||
result = s2c => {
|
||||
if let Err(e) = result {
|
||||
warn!(error = %e, "S->C task panicked");
|
||||
}
|
||||
None => {
|
||||
// Activity timeout (watchdog fired)
|
||||
let c2s = counters.c2s_bytes.load(Ordering::Relaxed);
|
||||
let s2c = counters.s2c_bytes.load(Ordering::Relaxed);
|
||||
debug!(
|
||||
user = %user_owned,
|
||||
c2s_bytes = c2s,
|
||||
s2c_bytes = s2c,
|
||||
c2s_msgs = c2s_ops,
|
||||
s2c_msgs = s2c_ops,
|
||||
duration_secs = duration.as_secs(),
|
||||
"Relay finished (activity timeout)"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
debug!(
|
||||
c2s_bytes = c2s_bytes.load(Ordering::Relaxed),
|
||||
s2c_bytes = s2c_bytes.load(Ordering::Relaxed),
|
||||
"Relay finished"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
373
src/startup.rs
Normal file
373
src/startup.rs
Normal file
@@ -0,0 +1,373 @@
|
||||
use std::time::{Instant, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
pub const COMPONENT_CONFIG_LOAD: &str = "config_load";
|
||||
pub const COMPONENT_TRACING_INIT: &str = "tracing_init";
|
||||
pub const COMPONENT_API_BOOTSTRAP: &str = "api_bootstrap";
|
||||
pub const COMPONENT_TLS_FRONT_BOOTSTRAP: &str = "tls_front_bootstrap";
|
||||
pub const COMPONENT_NETWORK_PROBE: &str = "network_probe";
|
||||
pub const COMPONENT_ME_SECRET_FETCH: &str = "me_secret_fetch";
|
||||
pub const COMPONENT_ME_PROXY_CONFIG_V4: &str = "me_proxy_config_fetch_v4";
|
||||
pub const COMPONENT_ME_PROXY_CONFIG_V6: &str = "me_proxy_config_fetch_v6";
|
||||
pub const COMPONENT_ME_POOL_CONSTRUCT: &str = "me_pool_construct";
|
||||
pub const COMPONENT_ME_POOL_INIT_STAGE1: &str = "me_pool_init_stage1";
|
||||
pub const COMPONENT_ME_CONNECTIVITY_PING: &str = "me_connectivity_ping";
|
||||
pub const COMPONENT_DC_CONNECTIVITY_PING: &str = "dc_connectivity_ping";
|
||||
pub const COMPONENT_LISTENERS_BIND: &str = "listeners_bind";
|
||||
pub const COMPONENT_CONFIG_WATCHER_START: &str = "config_watcher_start";
|
||||
pub const COMPONENT_METRICS_START: &str = "metrics_start";
|
||||
pub const COMPONENT_RUNTIME_READY: &str = "runtime_ready";
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum StartupStatus {
|
||||
Initializing,
|
||||
Ready,
|
||||
}
|
||||
|
||||
impl StartupStatus {
|
||||
pub fn as_str(self) -> &'static str {
|
||||
match self {
|
||||
Self::Initializing => "initializing",
|
||||
Self::Ready => "ready",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum StartupComponentStatus {
|
||||
Pending,
|
||||
Running,
|
||||
Ready,
|
||||
Failed,
|
||||
Skipped,
|
||||
}
|
||||
|
||||
impl StartupComponentStatus {
|
||||
pub fn as_str(self) -> &'static str {
|
||||
match self {
|
||||
Self::Pending => "pending",
|
||||
Self::Running => "running",
|
||||
Self::Ready => "ready",
|
||||
Self::Failed => "failed",
|
||||
Self::Skipped => "skipped",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum StartupMeStatus {
|
||||
Pending,
|
||||
Initializing,
|
||||
Ready,
|
||||
Failed,
|
||||
Skipped,
|
||||
}
|
||||
|
||||
impl StartupMeStatus {
|
||||
pub fn as_str(self) -> &'static str {
|
||||
match self {
|
||||
Self::Pending => "pending",
|
||||
Self::Initializing => "initializing",
|
||||
Self::Ready => "ready",
|
||||
Self::Failed => "failed",
|
||||
Self::Skipped => "skipped",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct StartupComponentSnapshot {
|
||||
pub id: &'static str,
|
||||
pub title: &'static str,
|
||||
pub weight: f64,
|
||||
pub status: StartupComponentStatus,
|
||||
pub started_at_epoch_ms: Option<u64>,
|
||||
pub finished_at_epoch_ms: Option<u64>,
|
||||
pub duration_ms: Option<u64>,
|
||||
pub attempts: u32,
|
||||
pub details: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct StartupMeSnapshot {
|
||||
pub status: StartupMeStatus,
|
||||
pub current_stage: String,
|
||||
pub init_attempt: u32,
|
||||
pub retry_limit: String,
|
||||
pub last_error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct StartupSnapshot {
|
||||
pub status: StartupStatus,
|
||||
pub degraded: bool,
|
||||
pub current_stage: String,
|
||||
pub started_at_epoch_secs: u64,
|
||||
pub ready_at_epoch_secs: Option<u64>,
|
||||
pub total_elapsed_ms: u64,
|
||||
pub transport_mode: String,
|
||||
pub me: StartupMeSnapshot,
|
||||
pub components: Vec<StartupComponentSnapshot>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct StartupComponent {
|
||||
id: &'static str,
|
||||
title: &'static str,
|
||||
weight: f64,
|
||||
status: StartupComponentStatus,
|
||||
started_at_epoch_ms: Option<u64>,
|
||||
finished_at_epoch_ms: Option<u64>,
|
||||
duration_ms: Option<u64>,
|
||||
attempts: u32,
|
||||
details: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct StartupState {
|
||||
status: StartupStatus,
|
||||
degraded: bool,
|
||||
current_stage: String,
|
||||
started_at_epoch_secs: u64,
|
||||
ready_at_epoch_secs: Option<u64>,
|
||||
transport_mode: String,
|
||||
me: StartupMeSnapshot,
|
||||
components: Vec<StartupComponent>,
|
||||
}
|
||||
|
||||
pub struct StartupTracker {
|
||||
started_at_instant: Instant,
|
||||
state: RwLock<StartupState>,
|
||||
}
|
||||
|
||||
impl StartupTracker {
|
||||
pub fn new(started_at_epoch_secs: u64) -> Self {
|
||||
Self {
|
||||
started_at_instant: Instant::now(),
|
||||
state: RwLock::new(StartupState {
|
||||
status: StartupStatus::Initializing,
|
||||
degraded: false,
|
||||
current_stage: COMPONENT_CONFIG_LOAD.to_string(),
|
||||
started_at_epoch_secs,
|
||||
ready_at_epoch_secs: None,
|
||||
transport_mode: "unknown".to_string(),
|
||||
me: StartupMeSnapshot {
|
||||
status: StartupMeStatus::Pending,
|
||||
current_stage: "pending".to_string(),
|
||||
init_attempt: 0,
|
||||
retry_limit: "unlimited".to_string(),
|
||||
last_error: None,
|
||||
},
|
||||
components: component_blueprint(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn set_transport_mode(&self, mode: &'static str) {
|
||||
self.state.write().await.transport_mode = mode.to_string();
|
||||
}
|
||||
|
||||
pub async fn set_degraded(&self, degraded: bool) {
|
||||
self.state.write().await.degraded = degraded;
|
||||
}
|
||||
|
||||
pub async fn start_component(&self, id: &'static str, details: Option<String>) {
|
||||
let mut guard = self.state.write().await;
|
||||
guard.current_stage = id.to_string();
|
||||
if let Some(component) = guard.components.iter_mut().find(|component| component.id == id) {
|
||||
if component.started_at_epoch_ms.is_none() {
|
||||
component.started_at_epoch_ms = Some(now_epoch_ms());
|
||||
}
|
||||
component.attempts = component.attempts.saturating_add(1);
|
||||
component.status = StartupComponentStatus::Running;
|
||||
component.details = normalize_details(details);
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn complete_component(&self, id: &'static str, details: Option<String>) {
|
||||
self.finish_component(id, StartupComponentStatus::Ready, details)
|
||||
.await;
|
||||
}
|
||||
|
||||
pub async fn fail_component(&self, id: &'static str, details: Option<String>) {
|
||||
self.finish_component(id, StartupComponentStatus::Failed, details)
|
||||
.await;
|
||||
}
|
||||
|
||||
pub async fn skip_component(&self, id: &'static str, details: Option<String>) {
|
||||
self.finish_component(id, StartupComponentStatus::Skipped, details)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn finish_component(
|
||||
&self,
|
||||
id: &'static str,
|
||||
status: StartupComponentStatus,
|
||||
details: Option<String>,
|
||||
) {
|
||||
let mut guard = self.state.write().await;
|
||||
let finished_at = now_epoch_ms();
|
||||
if let Some(component) = guard.components.iter_mut().find(|component| component.id == id) {
|
||||
if component.started_at_epoch_ms.is_none() {
|
||||
component.started_at_epoch_ms = Some(finished_at);
|
||||
component.attempts = component.attempts.saturating_add(1);
|
||||
}
|
||||
component.finished_at_epoch_ms = Some(finished_at);
|
||||
component.duration_ms = component
|
||||
.started_at_epoch_ms
|
||||
.map(|started_at| finished_at.saturating_sub(started_at));
|
||||
component.status = status;
|
||||
component.details = normalize_details(details);
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn set_me_status(&self, status: StartupMeStatus, stage: &'static str) {
|
||||
let mut guard = self.state.write().await;
|
||||
guard.me.status = status;
|
||||
guard.me.current_stage = stage.to_string();
|
||||
}
|
||||
|
||||
pub async fn set_me_retry_limit(&self, retry_limit: String) {
|
||||
self.state.write().await.me.retry_limit = retry_limit;
|
||||
}
|
||||
|
||||
pub async fn set_me_init_attempt(&self, attempt: u32) {
|
||||
self.state.write().await.me.init_attempt = attempt;
|
||||
}
|
||||
|
||||
pub async fn set_me_last_error(&self, error: Option<String>) {
|
||||
self.state.write().await.me.last_error = normalize_details(error);
|
||||
}
|
||||
|
||||
pub async fn mark_ready(&self) {
|
||||
let mut guard = self.state.write().await;
|
||||
if guard.status == StartupStatus::Ready {
|
||||
return;
|
||||
}
|
||||
guard.status = StartupStatus::Ready;
|
||||
guard.current_stage = "ready".to_string();
|
||||
guard.ready_at_epoch_secs = Some(now_epoch_secs());
|
||||
}
|
||||
|
||||
pub async fn snapshot(&self) -> StartupSnapshot {
|
||||
let guard = self.state.read().await;
|
||||
StartupSnapshot {
|
||||
status: guard.status,
|
||||
degraded: guard.degraded,
|
||||
current_stage: guard.current_stage.clone(),
|
||||
started_at_epoch_secs: guard.started_at_epoch_secs,
|
||||
ready_at_epoch_secs: guard.ready_at_epoch_secs,
|
||||
total_elapsed_ms: self.started_at_instant.elapsed().as_millis() as u64,
|
||||
transport_mode: guard.transport_mode.clone(),
|
||||
me: guard.me.clone(),
|
||||
components: guard
|
||||
.components
|
||||
.iter()
|
||||
.map(|component| StartupComponentSnapshot {
|
||||
id: component.id,
|
||||
title: component.title,
|
||||
weight: component.weight,
|
||||
status: component.status,
|
||||
started_at_epoch_ms: component.started_at_epoch_ms,
|
||||
finished_at_epoch_ms: component.finished_at_epoch_ms,
|
||||
duration_ms: component.duration_ms,
|
||||
attempts: component.attempts,
|
||||
details: component.details.clone(),
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn compute_progress_pct(snapshot: &StartupSnapshot, me_stage_progress: Option<f64>) -> f64 {
|
||||
if snapshot.status == StartupStatus::Ready {
|
||||
return 100.0;
|
||||
}
|
||||
|
||||
let mut total_weight = 0.0f64;
|
||||
let mut completed_weight = 0.0f64;
|
||||
|
||||
for component in &snapshot.components {
|
||||
total_weight += component.weight;
|
||||
let unit_progress = match component.status {
|
||||
StartupComponentStatus::Pending => 0.0,
|
||||
StartupComponentStatus::Running => {
|
||||
if component.id == COMPONENT_ME_POOL_INIT_STAGE1 {
|
||||
me_stage_progress.unwrap_or(0.0).clamp(0.0, 1.0)
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
}
|
||||
StartupComponentStatus::Ready
|
||||
| StartupComponentStatus::Failed
|
||||
| StartupComponentStatus::Skipped => 1.0,
|
||||
};
|
||||
completed_weight += component.weight * unit_progress;
|
||||
}
|
||||
|
||||
if total_weight <= f64::EPSILON {
|
||||
0.0
|
||||
} else {
|
||||
((completed_weight / total_weight) * 100.0).clamp(0.0, 100.0)
|
||||
}
|
||||
}
|
||||
|
||||
fn component_blueprint() -> Vec<StartupComponent> {
|
||||
vec![
|
||||
component(COMPONENT_CONFIG_LOAD, "Config load", 5.0),
|
||||
component(COMPONENT_TRACING_INIT, "Tracing init", 3.0),
|
||||
component(COMPONENT_API_BOOTSTRAP, "API bootstrap", 5.0),
|
||||
component(COMPONENT_TLS_FRONT_BOOTSTRAP, "TLS front bootstrap", 5.0),
|
||||
component(COMPONENT_NETWORK_PROBE, "Network probe", 10.0),
|
||||
component(COMPONENT_ME_SECRET_FETCH, "ME secret fetch", 8.0),
|
||||
component(COMPONENT_ME_PROXY_CONFIG_V4, "ME config v4 fetch", 4.0),
|
||||
component(COMPONENT_ME_PROXY_CONFIG_V6, "ME config v6 fetch", 4.0),
|
||||
component(COMPONENT_ME_POOL_CONSTRUCT, "ME pool construct", 6.0),
|
||||
component(COMPONENT_ME_POOL_INIT_STAGE1, "ME pool init stage1", 24.0),
|
||||
component(COMPONENT_ME_CONNECTIVITY_PING, "ME connectivity ping", 6.0),
|
||||
component(COMPONENT_DC_CONNECTIVITY_PING, "DC connectivity ping", 8.0),
|
||||
component(COMPONENT_LISTENERS_BIND, "Listener bind", 8.0),
|
||||
component(COMPONENT_CONFIG_WATCHER_START, "Config watcher start", 2.0),
|
||||
component(COMPONENT_METRICS_START, "Metrics start", 1.0),
|
||||
component(COMPONENT_RUNTIME_READY, "Runtime ready", 1.0),
|
||||
]
|
||||
}
|
||||
|
||||
fn component(id: &'static str, title: &'static str, weight: f64) -> StartupComponent {
|
||||
StartupComponent {
|
||||
id,
|
||||
title,
|
||||
weight,
|
||||
status: StartupComponentStatus::Pending,
|
||||
started_at_epoch_ms: None,
|
||||
finished_at_epoch_ms: None,
|
||||
duration_ms: None,
|
||||
attempts: 0,
|
||||
details: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn normalize_details(details: Option<String>) -> Option<String> {
|
||||
details.map(|detail| {
|
||||
if detail.len() <= 256 {
|
||||
detail
|
||||
} else {
|
||||
detail[..256].to_string()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
|
||||
fn now_epoch_ms() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as u64
|
||||
}
|
||||
117
src/stats/beobachten.rs
Normal file
117
src/stats/beobachten.rs
Normal file
@@ -0,0 +1,117 @@
|
||||
//! Per-IP forensic buckets for scanner and handshake failure observation.
|
||||
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::net::IpAddr;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use parking_lot::Mutex;
|
||||
|
||||
const CLEANUP_INTERVAL: Duration = Duration::from_secs(30);
|
||||
|
||||
#[derive(Default)]
|
||||
struct BeobachtenInner {
|
||||
entries: HashMap<(String, IpAddr), BeobachtenEntry>,
|
||||
last_cleanup: Option<Instant>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct BeobachtenEntry {
|
||||
tries: u64,
|
||||
last_seen: Instant,
|
||||
}
|
||||
|
||||
/// In-memory, TTL-scoped per-IP counters keyed by source class.
|
||||
pub struct BeobachtenStore {
|
||||
inner: Mutex<BeobachtenInner>,
|
||||
}
|
||||
|
||||
impl Default for BeobachtenStore {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl BeobachtenStore {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
inner: Mutex::new(BeobachtenInner::default()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn record(&self, class: &str, ip: IpAddr, ttl: Duration) {
|
||||
if class.is_empty() || ttl.is_zero() {
|
||||
return;
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
let mut guard = self.inner.lock();
|
||||
Self::cleanup_if_needed(&mut guard, now, ttl);
|
||||
|
||||
let key = (class.to_string(), ip);
|
||||
let entry = guard.entries.entry(key).or_insert(BeobachtenEntry {
|
||||
tries: 0,
|
||||
last_seen: now,
|
||||
});
|
||||
entry.tries = entry.tries.saturating_add(1);
|
||||
entry.last_seen = now;
|
||||
}
|
||||
|
||||
pub fn snapshot_text(&self, ttl: Duration) -> String {
|
||||
if ttl.is_zero() {
|
||||
return "beobachten disabled\n".to_string();
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
let mut guard = self.inner.lock();
|
||||
Self::cleanup(&mut guard, now, ttl);
|
||||
guard.last_cleanup = Some(now);
|
||||
|
||||
let mut grouped = BTreeMap::<String, Vec<(IpAddr, u64)>>::new();
|
||||
for ((class, ip), entry) in &guard.entries {
|
||||
grouped
|
||||
.entry(class.clone())
|
||||
.or_default()
|
||||
.push((*ip, entry.tries));
|
||||
}
|
||||
|
||||
if grouped.is_empty() {
|
||||
return "empty\n".to_string();
|
||||
}
|
||||
|
||||
let mut out = String::with_capacity(grouped.len() * 64);
|
||||
for (class, entries) in &mut grouped {
|
||||
out.push('[');
|
||||
out.push_str(class);
|
||||
out.push_str("]\n");
|
||||
|
||||
entries.sort_by(|(ip_a, tries_a), (ip_b, tries_b)| {
|
||||
tries_b
|
||||
.cmp(tries_a)
|
||||
.then_with(|| ip_a.to_string().cmp(&ip_b.to_string()))
|
||||
});
|
||||
|
||||
for (ip, tries) in entries {
|
||||
out.push_str(&format!("{ip}-{tries}\n"));
|
||||
}
|
||||
}
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
fn cleanup_if_needed(inner: &mut BeobachtenInner, now: Instant, ttl: Duration) {
|
||||
let should_cleanup = match inner.last_cleanup {
|
||||
Some(last) => now.saturating_duration_since(last) >= CLEANUP_INTERVAL,
|
||||
None => true,
|
||||
};
|
||||
if should_cleanup {
|
||||
Self::cleanup(inner, now, ttl);
|
||||
inner.last_cleanup = Some(now);
|
||||
}
|
||||
}
|
||||
|
||||
fn cleanup(inner: &mut BeobachtenInner, now: Instant, ttl: Duration) {
|
||||
inner.entries.retain(|_, entry| {
|
||||
now.saturating_duration_since(entry.last_seen) <= ttl
|
||||
});
|
||||
}
|
||||
}
|
||||
1748
src/stats/mod.rs
1748
src/stats/mod.rs
File diff suppressed because it is too large
Load Diff
29
src/stats/telemetry.rs
Normal file
29
src/stats/telemetry.rs
Normal file
@@ -0,0 +1,29 @@
|
||||
use crate::config::{MeTelemetryLevel, TelemetryConfig};
|
||||
|
||||
/// Runtime telemetry policy used by hot-path counters.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct TelemetryPolicy {
|
||||
pub core_enabled: bool,
|
||||
pub user_enabled: bool,
|
||||
pub me_level: MeTelemetryLevel,
|
||||
}
|
||||
|
||||
impl Default for TelemetryPolicy {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
core_enabled: true,
|
||||
user_enabled: true,
|
||||
me_level: MeTelemetryLevel::Normal,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TelemetryPolicy {
|
||||
pub fn from_config(cfg: &TelemetryConfig) -> Self {
|
||||
Self {
|
||||
core_enabled: cfg.core_enabled,
|
||||
user_enabled: cfg.user_enabled,
|
||||
me_level: cfg.me_level,
|
||||
}
|
||||
}
|
||||
}
|
||||
458
src/stream/buffer_pool.rs
Normal file
458
src/stream/buffer_pool.rs
Normal file
@@ -0,0 +1,458 @@
|
||||
//! Reusable buffer pool to avoid allocations in hot paths
|
||||
//!
|
||||
//! This module provides a thread-safe pool of BytesMut buffers
|
||||
//! that can be reused across connections to reduce allocation pressure.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use bytes::BytesMut;
|
||||
use crossbeam_queue::ArrayQueue;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
// ============= Configuration =============
|
||||
|
||||
/// Default buffer size
|
||||
/// CHANGED: Reduced from 64KB to 16KB to match TLS record size and prevent bufferbloat.
|
||||
pub const DEFAULT_BUFFER_SIZE: usize = 16 * 1024;
|
||||
|
||||
/// Default maximum number of pooled buffers
|
||||
pub const DEFAULT_MAX_BUFFERS: usize = 1024;
|
||||
|
||||
// ============= Buffer Pool =============
|
||||
|
||||
/// Thread-safe pool of reusable buffers
|
||||
pub struct BufferPool {
|
||||
/// Queue of available buffers
|
||||
buffers: ArrayQueue<BytesMut>,
|
||||
/// Size of each buffer
|
||||
buffer_size: usize,
|
||||
/// Maximum number of buffers to pool
|
||||
max_buffers: usize,
|
||||
/// Total allocated buffers (including in-use)
|
||||
allocated: AtomicUsize,
|
||||
/// Number of times we had to create a new buffer
|
||||
misses: AtomicUsize,
|
||||
/// Number of successful reuses
|
||||
hits: AtomicUsize,
|
||||
}
|
||||
|
||||
impl BufferPool {
|
||||
/// Create a new buffer pool with default settings
|
||||
pub fn new() -> Self {
|
||||
Self::with_config(DEFAULT_BUFFER_SIZE, DEFAULT_MAX_BUFFERS)
|
||||
}
|
||||
|
||||
/// Create a buffer pool with custom configuration
|
||||
pub fn with_config(buffer_size: usize, max_buffers: usize) -> Self {
|
||||
Self {
|
||||
buffers: ArrayQueue::new(max_buffers),
|
||||
buffer_size,
|
||||
max_buffers,
|
||||
allocated: AtomicUsize::new(0),
|
||||
misses: AtomicUsize::new(0),
|
||||
hits: AtomicUsize::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a buffer from the pool, or create a new one if empty
|
||||
pub fn get(self: &Arc<Self>) -> PooledBuffer {
|
||||
match self.buffers.pop() {
|
||||
Some(mut buffer) => {
|
||||
self.hits.fetch_add(1, Ordering::Relaxed);
|
||||
buffer.clear();
|
||||
PooledBuffer {
|
||||
buffer: Some(buffer),
|
||||
pool: Arc::clone(self),
|
||||
}
|
||||
}
|
||||
None => {
|
||||
self.misses.fetch_add(1, Ordering::Relaxed);
|
||||
self.allocated.fetch_add(1, Ordering::Relaxed);
|
||||
PooledBuffer {
|
||||
buffer: Some(BytesMut::with_capacity(self.buffer_size)),
|
||||
pool: Arc::clone(self),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to get a buffer, returns None if pool is empty
|
||||
pub fn try_get(self: &Arc<Self>) -> Option<PooledBuffer> {
|
||||
self.buffers.pop().map(|mut buffer| {
|
||||
self.hits.fetch_add(1, Ordering::Relaxed);
|
||||
buffer.clear();
|
||||
PooledBuffer {
|
||||
buffer: Some(buffer),
|
||||
pool: Arc::clone(self),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Return a buffer to the pool
|
||||
fn return_buffer(&self, mut buffer: BytesMut) {
|
||||
// Clear the buffer but keep capacity
|
||||
buffer.clear();
|
||||
|
||||
// Only return if we haven't exceeded max and buffer is right size
|
||||
if buffer.capacity() >= self.buffer_size {
|
||||
// Try to push to pool, if full just drop
|
||||
let _ = self.buffers.push(buffer);
|
||||
}
|
||||
// If buffer was dropped (pool full), decrement allocated
|
||||
// Actually we don't decrement here because the buffer might have been
|
||||
// grown beyond our size - we just let it go
|
||||
}
|
||||
|
||||
/// Get pool statistics
|
||||
pub fn stats(&self) -> PoolStats {
|
||||
PoolStats {
|
||||
pooled: self.buffers.len(),
|
||||
allocated: self.allocated.load(Ordering::Relaxed),
|
||||
max_buffers: self.max_buffers,
|
||||
buffer_size: self.buffer_size,
|
||||
hits: self.hits.load(Ordering::Relaxed),
|
||||
misses: self.misses.load(Ordering::Relaxed),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get buffer size
|
||||
pub fn buffer_size(&self) -> usize {
|
||||
self.buffer_size
|
||||
}
|
||||
|
||||
/// Preallocate buffers to fill the pool
|
||||
pub fn preallocate(&self, count: usize) {
|
||||
let to_alloc = count.min(self.max_buffers);
|
||||
for _ in 0..to_alloc {
|
||||
if self.buffers.push(BytesMut::with_capacity(self.buffer_size)).is_err() {
|
||||
break;
|
||||
}
|
||||
self.allocated.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for BufferPool {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
// ============= Pool Statistics =============
|
||||
|
||||
/// Statistics about buffer pool usage
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PoolStats {
|
||||
/// Current number of buffers in pool
|
||||
pub pooled: usize,
|
||||
/// Total buffers allocated (in-use + pooled)
|
||||
pub allocated: usize,
|
||||
/// Maximum buffers allowed
|
||||
pub max_buffers: usize,
|
||||
/// Size of each buffer
|
||||
pub buffer_size: usize,
|
||||
/// Number of cache hits (reused buffer)
|
||||
pub hits: usize,
|
||||
/// Number of cache misses (new allocation)
|
||||
pub misses: usize,
|
||||
}
|
||||
|
||||
impl PoolStats {
|
||||
/// Get hit rate as percentage
|
||||
pub fn hit_rate(&self) -> f64 {
|
||||
let total = self.hits + self.misses;
|
||||
if total == 0 {
|
||||
0.0
|
||||
} else {
|
||||
(self.hits as f64 / total as f64) * 100.0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============= Pooled Buffer =============
|
||||
|
||||
/// A buffer that automatically returns to the pool when dropped
|
||||
pub struct PooledBuffer {
|
||||
buffer: Option<BytesMut>,
|
||||
pool: Arc<BufferPool>,
|
||||
}
|
||||
|
||||
impl PooledBuffer {
|
||||
/// Take the inner buffer, preventing return to pool
|
||||
pub fn take(mut self) -> BytesMut {
|
||||
self.buffer.take().unwrap()
|
||||
}
|
||||
|
||||
/// Get the capacity of the buffer
|
||||
pub fn capacity(&self) -> usize {
|
||||
self.buffer.as_ref().map(|b| b.capacity()).unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Check if buffer is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.buffer.as_ref().map(|b| b.is_empty()).unwrap_or(true)
|
||||
}
|
||||
|
||||
/// Get the length of data in buffer
|
||||
pub fn len(&self) -> usize {
|
||||
self.buffer.as_ref().map(|b| b.len()).unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Clear the buffer
|
||||
pub fn clear(&mut self) {
|
||||
if let Some(ref mut b) = self.buffer {
|
||||
b.clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for PooledBuffer {
|
||||
type Target = BytesMut;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.buffer.as_ref().expect("buffer taken")
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for PooledBuffer {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.buffer.as_mut().expect("buffer taken")
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for PooledBuffer {
|
||||
fn drop(&mut self) {
|
||||
if let Some(buffer) = self.buffer.take() {
|
||||
self.pool.return_buffer(buffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<[u8]> for PooledBuffer {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
self.buffer.as_ref().map(|b| b.as_ref()).unwrap_or(&[])
|
||||
}
|
||||
}
|
||||
|
||||
impl AsMut<[u8]> for PooledBuffer {
|
||||
fn as_mut(&mut self) -> &mut [u8] {
|
||||
self.buffer.as_mut().map(|b| b.as_mut()).unwrap_or(&mut [])
|
||||
}
|
||||
}
|
||||
|
||||
// ============= Scoped Buffer =============
|
||||
|
||||
/// A buffer that can be used for a scoped operation
|
||||
/// Useful for ensuring buffer is returned even on early return
|
||||
pub struct ScopedBuffer<'a> {
|
||||
buffer: &'a mut PooledBuffer,
|
||||
}
|
||||
|
||||
impl<'a> ScopedBuffer<'a> {
|
||||
/// Create a new scoped buffer
|
||||
pub fn new(buffer: &'a mut PooledBuffer) -> Self {
|
||||
buffer.clear();
|
||||
Self { buffer }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Deref for ScopedBuffer<'a> {
|
||||
type Target = BytesMut;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.buffer.deref()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> DerefMut for ScopedBuffer<'a> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.buffer.deref_mut()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Drop for ScopedBuffer<'a> {
|
||||
fn drop(&mut self) {
|
||||
self.buffer.clear();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_pool_basic() {
|
||||
let pool = Arc::new(BufferPool::with_config(1024, 10));
|
||||
|
||||
// Get a buffer
|
||||
let mut buf1 = pool.get();
|
||||
buf1.extend_from_slice(b"hello");
|
||||
assert_eq!(&buf1[..], b"hello");
|
||||
|
||||
// Drop returns to pool
|
||||
drop(buf1);
|
||||
|
||||
let stats = pool.stats();
|
||||
assert_eq!(stats.pooled, 1);
|
||||
assert_eq!(stats.hits, 0);
|
||||
assert_eq!(stats.misses, 1);
|
||||
|
||||
// Get again - should reuse
|
||||
let buf2 = pool.get();
|
||||
assert!(buf2.is_empty()); // Buffer was cleared
|
||||
|
||||
let stats = pool.stats();
|
||||
assert_eq!(stats.pooled, 0);
|
||||
assert_eq!(stats.hits, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pool_multiple_buffers() {
|
||||
let pool = Arc::new(BufferPool::with_config(1024, 10));
|
||||
|
||||
// Get multiple buffers
|
||||
let buf1 = pool.get();
|
||||
let buf2 = pool.get();
|
||||
let buf3 = pool.get();
|
||||
|
||||
let stats = pool.stats();
|
||||
assert_eq!(stats.allocated, 3);
|
||||
assert_eq!(stats.pooled, 0);
|
||||
|
||||
// Return all
|
||||
drop(buf1);
|
||||
drop(buf2);
|
||||
drop(buf3);
|
||||
|
||||
let stats = pool.stats();
|
||||
assert_eq!(stats.pooled, 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pool_overflow() {
|
||||
let pool = Arc::new(BufferPool::with_config(1024, 2));
|
||||
|
||||
// Get 3 buffers (more than max)
|
||||
let buf1 = pool.get();
|
||||
let buf2 = pool.get();
|
||||
let buf3 = pool.get();
|
||||
|
||||
// Return all - only 2 should be pooled
|
||||
drop(buf1);
|
||||
drop(buf2);
|
||||
drop(buf3);
|
||||
|
||||
let stats = pool.stats();
|
||||
assert_eq!(stats.pooled, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pool_take() {
|
||||
let pool = Arc::new(BufferPool::with_config(1024, 10));
|
||||
|
||||
let mut buf = pool.get();
|
||||
buf.extend_from_slice(b"data");
|
||||
|
||||
// Take ownership, buffer should not return to pool
|
||||
let taken = buf.take();
|
||||
assert_eq!(&taken[..], b"data");
|
||||
|
||||
let stats = pool.stats();
|
||||
assert_eq!(stats.pooled, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pool_preallocate() {
|
||||
let pool = Arc::new(BufferPool::with_config(1024, 10));
|
||||
pool.preallocate(5);
|
||||
|
||||
let stats = pool.stats();
|
||||
assert_eq!(stats.pooled, 5);
|
||||
assert_eq!(stats.allocated, 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pool_try_get() {
|
||||
let pool = Arc::new(BufferPool::with_config(1024, 10));
|
||||
|
||||
// Pool is empty, try_get returns None
|
||||
assert!(pool.try_get().is_none());
|
||||
|
||||
// Add a buffer to pool
|
||||
pool.preallocate(1);
|
||||
|
||||
// Now try_get should succeed once while the buffer is held
|
||||
let buf = pool.try_get();
|
||||
assert!(buf.is_some());
|
||||
// While buffer is held, pool is empty
|
||||
assert!(pool.try_get().is_none());
|
||||
// Drop buffer -> returns to pool, should be obtainable again
|
||||
drop(buf);
|
||||
assert!(pool.try_get().is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hit_rate() {
|
||||
let pool = Arc::new(BufferPool::with_config(1024, 10));
|
||||
|
||||
// First get is a miss
|
||||
let buf1 = pool.get();
|
||||
drop(buf1);
|
||||
|
||||
// Second get is a hit
|
||||
let buf2 = pool.get();
|
||||
drop(buf2);
|
||||
|
||||
// Third get is a hit
|
||||
let _buf3 = pool.get();
|
||||
|
||||
let stats = pool.stats();
|
||||
assert_eq!(stats.hits, 2);
|
||||
assert_eq!(stats.misses, 1);
|
||||
assert!((stats.hit_rate() - 66.67).abs() < 1.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scoped_buffer() {
|
||||
let pool = Arc::new(BufferPool::with_config(1024, 10));
|
||||
let mut buf = pool.get();
|
||||
|
||||
{
|
||||
let mut scoped = ScopedBuffer::new(&mut buf);
|
||||
scoped.extend_from_slice(b"scoped data");
|
||||
assert_eq!(&scoped[..], b"scoped data");
|
||||
}
|
||||
|
||||
// After scoped is dropped, buffer is cleared
|
||||
assert!(buf.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concurrent_access() {
|
||||
use std::thread;
|
||||
|
||||
let pool = Arc::new(BufferPool::with_config(1024, 100));
|
||||
let mut handles = vec![];
|
||||
|
||||
for _ in 0..10 {
|
||||
let pool_clone = Arc::clone(&pool);
|
||||
handles.push(thread::spawn(move || {
|
||||
for _ in 0..100 {
|
||||
let mut buf = pool_clone.get();
|
||||
buf.extend_from_slice(b"test");
|
||||
// buf auto-returned on drop
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
for handle in handles {
|
||||
handle.join().unwrap();
|
||||
}
|
||||
|
||||
let stats = pool.stats();
|
||||
// All buffers should be returned
|
||||
assert!(stats.pooled > 0);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
191
src/stream/frame.rs
Normal file
191
src/stream/frame.rs
Normal file
@@ -0,0 +1,191 @@
|
||||
//! MTProto frame types and traits
|
||||
//!
|
||||
//! This module defines the common types and traits used by all
|
||||
//! frame encoding/decoding implementations.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use std::io::Result;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::protocol::constants::ProtoTag;
|
||||
use crate::crypto::SecureRandom;
|
||||
|
||||
// ============= Frame Types =============
|
||||
|
||||
/// A decoded MTProto frame
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Frame {
|
||||
/// Frame payload data
|
||||
pub data: Bytes,
|
||||
/// Frame metadata
|
||||
pub meta: FrameMeta,
|
||||
}
|
||||
|
||||
impl Frame {
|
||||
/// Create a new frame with data and default metadata
|
||||
pub fn new(data: Bytes) -> Self {
|
||||
Self {
|
||||
data,
|
||||
meta: FrameMeta::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new frame with data and metadata
|
||||
pub fn with_meta(data: Bytes, meta: FrameMeta) -> Self {
|
||||
Self { data, meta }
|
||||
}
|
||||
|
||||
/// Create an empty frame
|
||||
pub fn empty() -> Self {
|
||||
Self::new(Bytes::new())
|
||||
}
|
||||
|
||||
/// Check if frame is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.data.is_empty()
|
||||
}
|
||||
|
||||
/// Get frame length
|
||||
pub fn len(&self) -> usize {
|
||||
self.data.len()
|
||||
}
|
||||
|
||||
/// Create a QuickAck request frame
|
||||
pub fn quickack(data: Bytes) -> Self {
|
||||
Self {
|
||||
data,
|
||||
meta: FrameMeta {
|
||||
quickack: true,
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a simple ACK frame
|
||||
pub fn simple_ack(data: Bytes) -> Self {
|
||||
Self {
|
||||
data,
|
||||
meta: FrameMeta {
|
||||
simple_ack: true,
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Frame metadata
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct FrameMeta {
|
||||
/// Quick ACK requested - client wants immediate acknowledgment
|
||||
pub quickack: bool,
|
||||
/// This is a simple ACK message (reversed data)
|
||||
pub simple_ack: bool,
|
||||
/// Original padding length (for secure mode)
|
||||
pub padding_len: u8,
|
||||
}
|
||||
|
||||
impl FrameMeta {
|
||||
/// Create new empty metadata
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Create with quickack flag
|
||||
pub fn with_quickack(mut self) -> Self {
|
||||
self.quickack = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Create with simple_ack flag
|
||||
pub fn with_simple_ack(mut self) -> Self {
|
||||
self.simple_ack = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Create with padding length
|
||||
pub fn with_padding(mut self, len: u8) -> Self {
|
||||
self.padding_len = len;
|
||||
self
|
||||
}
|
||||
|
||||
/// Check if any special flags are set
|
||||
pub fn has_flags(&self) -> bool {
|
||||
self.quickack || self.simple_ack
|
||||
}
|
||||
}
|
||||
|
||||
// ============= Codec Trait =============
|
||||
|
||||
/// Trait for frame codecs that can encode and decode frames
|
||||
pub trait FrameCodec: Send + Sync {
|
||||
/// Get the protocol tag for this codec
|
||||
fn proto_tag(&self) -> ProtoTag;
|
||||
|
||||
/// Encode a frame into the destination buffer
|
||||
///
|
||||
/// Returns the number of bytes written.
|
||||
fn encode(&self, frame: &Frame, dst: &mut BytesMut) -> Result<usize>;
|
||||
|
||||
/// Try to decode a frame from the source buffer
|
||||
///
|
||||
/// Returns:
|
||||
/// - `Ok(Some(frame))` if a complete frame was decoded
|
||||
/// - `Ok(None)` if more data is needed
|
||||
/// - `Err(e)` if an error occurred
|
||||
///
|
||||
/// On success, the consumed bytes are removed from `src`.
|
||||
fn decode(&self, src: &mut BytesMut) -> Result<Option<Frame>>;
|
||||
|
||||
/// Get the minimum bytes needed to determine frame length
|
||||
fn min_header_size(&self) -> usize;
|
||||
|
||||
/// Get the maximum allowed frame size
|
||||
fn max_frame_size(&self) -> usize {
|
||||
// Default: 16MB
|
||||
16 * 1024 * 1024
|
||||
}
|
||||
}
|
||||
|
||||
// ============= Codec Factory =============
|
||||
|
||||
/// Create a frame codec for the given protocol tag
|
||||
pub fn create_codec(proto_tag: ProtoTag, rng: Arc<SecureRandom>) -> Box<dyn FrameCodec> {
|
||||
match proto_tag {
|
||||
ProtoTag::Abridged => Box::new(crate::stream::frame_codec::AbridgedCodec::new()),
|
||||
ProtoTag::Intermediate => Box::new(crate::stream::frame_codec::IntermediateCodec::new()),
|
||||
ProtoTag::Secure => Box::new(crate::stream::frame_codec::SecureCodec::new(rng)),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_frame_creation() {
|
||||
let frame = Frame::new(Bytes::from_static(b"test"));
|
||||
assert_eq!(frame.len(), 4);
|
||||
assert!(!frame.is_empty());
|
||||
assert!(!frame.meta.quickack);
|
||||
|
||||
let frame = Frame::empty();
|
||||
assert!(frame.is_empty());
|
||||
|
||||
let frame = Frame::quickack(Bytes::from_static(b"ack"));
|
||||
assert!(frame.meta.quickack);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_frame_meta() {
|
||||
let meta = FrameMeta::new()
|
||||
.with_quickack()
|
||||
.with_padding(3);
|
||||
|
||||
assert!(meta.quickack);
|
||||
assert!(!meta.simple_ack);
|
||||
assert_eq!(meta.padding_len, 3);
|
||||
assert!(meta.has_flags());
|
||||
}
|
||||
}
|
||||
633
src/stream/frame_codec.rs
Normal file
633
src/stream/frame_codec.rs
Normal file
@@ -0,0 +1,633 @@
|
||||
//! tokio-util codec integration for MTProto frames
|
||||
//!
|
||||
//! This module provides Encoder/Decoder implementations compatible
|
||||
//! with tokio-util's Framed wrapper for easy async frame I/O.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use bytes::{Bytes, BytesMut, BufMut};
|
||||
use std::io::{self, Error, ErrorKind};
|
||||
use std::sync::Arc;
|
||||
use tokio_util::codec::{Decoder, Encoder};
|
||||
|
||||
use crate::protocol::constants::{
|
||||
ProtoTag, is_valid_secure_payload_len, secure_padding_len, secure_payload_len_from_wire_len,
|
||||
};
|
||||
use crate::crypto::SecureRandom;
|
||||
use super::frame::{Frame, FrameMeta, FrameCodec as FrameCodecTrait};
|
||||
|
||||
// ============= Unified Codec =============
|
||||
|
||||
/// Unified frame codec that wraps all protocol variants
|
||||
///
|
||||
/// This codec implements tokio-util's Encoder and Decoder traits,
|
||||
/// allowing it to be used with `Framed` for async frame I/O.
|
||||
pub struct FrameCodec {
|
||||
/// Protocol variant
|
||||
proto_tag: ProtoTag,
|
||||
/// Maximum allowed frame size
|
||||
max_frame_size: usize,
|
||||
/// RNG for secure padding
|
||||
rng: Arc<SecureRandom>,
|
||||
}
|
||||
|
||||
impl FrameCodec {
|
||||
/// Create a new codec for the given protocol
|
||||
pub fn new(proto_tag: ProtoTag, rng: Arc<SecureRandom>) -> Self {
|
||||
Self {
|
||||
proto_tag,
|
||||
max_frame_size: 16 * 1024 * 1024, // 16MB default
|
||||
rng,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set maximum frame size
|
||||
pub fn with_max_frame_size(mut self, size: usize) -> Self {
|
||||
self.max_frame_size = size;
|
||||
self
|
||||
}
|
||||
|
||||
/// Get protocol tag
|
||||
pub fn proto_tag(&self) -> ProtoTag {
|
||||
self.proto_tag
|
||||
}
|
||||
}
|
||||
|
||||
impl Decoder for FrameCodec {
|
||||
type Item = Frame;
|
||||
type Error = io::Error;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
match self.proto_tag {
|
||||
ProtoTag::Abridged => decode_abridged(src, self.max_frame_size),
|
||||
ProtoTag::Intermediate => decode_intermediate(src, self.max_frame_size),
|
||||
ProtoTag::Secure => decode_secure(src, self.max_frame_size),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Encoder<Frame> for FrameCodec {
|
||||
type Error = io::Error;
|
||||
|
||||
fn encode(&mut self, frame: Frame, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
match self.proto_tag {
|
||||
ProtoTag::Abridged => encode_abridged(&frame, dst),
|
||||
ProtoTag::Intermediate => encode_intermediate(&frame, dst),
|
||||
ProtoTag::Secure => encode_secure(&frame, dst, &self.rng),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============= Abridged Protocol =============
|
||||
|
||||
fn decode_abridged(src: &mut BytesMut, max_size: usize) -> io::Result<Option<Frame>> {
|
||||
if src.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let mut meta = FrameMeta::new();
|
||||
let first_byte = src[0];
|
||||
|
||||
// Extract length and quickack flag
|
||||
let mut len_words = (first_byte & 0x7f) as usize;
|
||||
if first_byte >= 0x80 {
|
||||
meta.quickack = true;
|
||||
}
|
||||
|
||||
let header_len;
|
||||
|
||||
if len_words == 0x7f {
|
||||
// Extended length (3 more bytes needed)
|
||||
if src.len() < 4 {
|
||||
return Ok(None);
|
||||
}
|
||||
len_words = u32::from_le_bytes([src[1], src[2], src[3], 0]) as usize;
|
||||
header_len = 4;
|
||||
} else {
|
||||
header_len = 1;
|
||||
}
|
||||
|
||||
// Length is in 4-byte words
|
||||
let byte_len = len_words.checked_mul(4).ok_or_else(|| {
|
||||
Error::new(ErrorKind::InvalidData, "frame length overflow")
|
||||
})?;
|
||||
|
||||
// Validate size
|
||||
if byte_len > max_size {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("frame too large: {} bytes (max {})", byte_len, max_size)
|
||||
));
|
||||
}
|
||||
|
||||
let total_len = header_len + byte_len;
|
||||
|
||||
if src.len() < total_len {
|
||||
// Reserve space for the rest of the frame
|
||||
src.reserve(total_len - src.len());
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Extract data
|
||||
let _ = src.split_to(header_len);
|
||||
let data = src.split_to(byte_len).freeze();
|
||||
|
||||
Ok(Some(Frame::with_meta(data, meta)))
|
||||
}
|
||||
|
||||
fn encode_abridged(frame: &Frame, dst: &mut BytesMut) -> io::Result<()> {
|
||||
let data = &frame.data;
|
||||
|
||||
// Validate alignment
|
||||
if !data.len().is_multiple_of(4) {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidInput,
|
||||
format!("abridged frame must be 4-byte aligned, got {} bytes", data.len())
|
||||
));
|
||||
}
|
||||
|
||||
// Simple ACK: send reversed data without header
|
||||
if frame.meta.simple_ack {
|
||||
dst.reserve(data.len());
|
||||
for byte in data.iter().rev() {
|
||||
dst.put_u8(*byte);
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let len_words = data.len() / 4;
|
||||
|
||||
if len_words < 0x7f {
|
||||
// Short header
|
||||
dst.reserve(1 + data.len());
|
||||
let mut len_byte = len_words as u8;
|
||||
if frame.meta.quickack {
|
||||
len_byte |= 0x80;
|
||||
}
|
||||
dst.put_u8(len_byte);
|
||||
} else if len_words < (1 << 24) {
|
||||
// Extended header
|
||||
dst.reserve(4 + data.len());
|
||||
let mut first = 0x7fu8;
|
||||
if frame.meta.quickack {
|
||||
first |= 0x80;
|
||||
}
|
||||
dst.put_u8(first);
|
||||
let len_bytes = (len_words as u32).to_le_bytes();
|
||||
dst.extend_from_slice(&len_bytes[..3]);
|
||||
} else {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidInput,
|
||||
format!("frame too large: {} bytes", data.len())
|
||||
));
|
||||
}
|
||||
|
||||
dst.extend_from_slice(data);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ============= Intermediate Protocol =============
|
||||
|
||||
fn decode_intermediate(src: &mut BytesMut, max_size: usize) -> io::Result<Option<Frame>> {
|
||||
if src.len() < 4 {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let mut meta = FrameMeta::new();
|
||||
let mut len = u32::from_le_bytes([src[0], src[1], src[2], src[3]]) as usize;
|
||||
|
||||
// Check QuickACK flag
|
||||
if len >= 0x80000000 {
|
||||
meta.quickack = true;
|
||||
len -= 0x80000000;
|
||||
}
|
||||
|
||||
// Validate size
|
||||
if len > max_size {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("frame too large: {} bytes (max {})", len, max_size)
|
||||
));
|
||||
}
|
||||
|
||||
let total_len = 4 + len;
|
||||
|
||||
if src.len() < total_len {
|
||||
src.reserve(total_len - src.len());
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Extract data
|
||||
let _ = src.split_to(4);
|
||||
let data = src.split_to(len).freeze();
|
||||
|
||||
Ok(Some(Frame::with_meta(data, meta)))
|
||||
}
|
||||
|
||||
fn encode_intermediate(frame: &Frame, dst: &mut BytesMut) -> io::Result<()> {
|
||||
let data = &frame.data;
|
||||
|
||||
// Simple ACK: just send data
|
||||
if frame.meta.simple_ack {
|
||||
dst.reserve(data.len());
|
||||
dst.extend_from_slice(data);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
dst.reserve(4 + data.len());
|
||||
|
||||
let mut len = data.len() as u32;
|
||||
if frame.meta.quickack {
|
||||
len |= 0x80000000;
|
||||
}
|
||||
|
||||
dst.extend_from_slice(&len.to_le_bytes());
|
||||
dst.extend_from_slice(data);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ============= Secure Intermediate Protocol =============
|
||||
|
||||
fn decode_secure(src: &mut BytesMut, max_size: usize) -> io::Result<Option<Frame>> {
|
||||
if src.len() < 4 {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let mut meta = FrameMeta::new();
|
||||
let mut len = u32::from_le_bytes([src[0], src[1], src[2], src[3]]) as usize;
|
||||
|
||||
// Check QuickACK flag
|
||||
if len >= 0x80000000 {
|
||||
meta.quickack = true;
|
||||
len -= 0x80000000;
|
||||
}
|
||||
|
||||
// Validate size
|
||||
if len > max_size {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("frame too large: {} bytes (max {})", len, max_size)
|
||||
));
|
||||
}
|
||||
|
||||
let total_len = 4 + len;
|
||||
|
||||
if src.len() < total_len {
|
||||
src.reserve(total_len - src.len());
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let data_len = secure_payload_len_from_wire_len(len).ok_or_else(|| {
|
||||
Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("invalid secure frame length: {len}"),
|
||||
)
|
||||
})?;
|
||||
let padding_len = len - data_len;
|
||||
|
||||
meta.padding_len = padding_len as u8;
|
||||
|
||||
// Extract data (excluding padding)
|
||||
let _ = src.split_to(4);
|
||||
let all_data = src.split_to(len);
|
||||
// Copy only the data portion, excluding padding
|
||||
let data = Bytes::copy_from_slice(&all_data[..data_len]);
|
||||
|
||||
Ok(Some(Frame::with_meta(data, meta)))
|
||||
}
|
||||
|
||||
fn encode_secure(frame: &Frame, dst: &mut BytesMut, rng: &SecureRandom) -> io::Result<()> {
|
||||
let data = &frame.data;
|
||||
|
||||
// Simple ACK: just send data
|
||||
if frame.meta.simple_ack {
|
||||
dst.reserve(data.len());
|
||||
dst.extend_from_slice(data);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if !is_valid_secure_payload_len(data.len()) {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("secure payload must be 4-byte aligned, got {}", data.len()),
|
||||
));
|
||||
}
|
||||
|
||||
// Generate padding that keeps total length non-divisible by 4.
|
||||
let padding_len = secure_padding_len(data.len(), rng);
|
||||
|
||||
let total_len = data.len() + padding_len;
|
||||
dst.reserve(4 + total_len);
|
||||
|
||||
let mut len = total_len as u32;
|
||||
if frame.meta.quickack {
|
||||
len |= 0x80000000;
|
||||
}
|
||||
|
||||
dst.extend_from_slice(&len.to_le_bytes());
|
||||
dst.extend_from_slice(data);
|
||||
|
||||
if padding_len > 0 {
|
||||
let padding = rng.bytes(padding_len);
|
||||
dst.extend_from_slice(&padding);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ============= Typed Codecs =============
|
||||
|
||||
/// Abridged protocol codec
|
||||
pub struct AbridgedCodec {
|
||||
max_frame_size: usize,
|
||||
}
|
||||
|
||||
impl AbridgedCodec {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
max_frame_size: 16 * 1024 * 1024,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AbridgedCodec {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl Decoder for AbridgedCodec {
|
||||
type Item = Frame;
|
||||
type Error = io::Error;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
decode_abridged(src, self.max_frame_size)
|
||||
}
|
||||
}
|
||||
|
||||
impl Encoder<Frame> for AbridgedCodec {
|
||||
type Error = io::Error;
|
||||
|
||||
fn encode(&mut self, frame: Frame, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
encode_abridged(&frame, dst)
|
||||
}
|
||||
}
|
||||
|
||||
impl FrameCodecTrait for AbridgedCodec {
|
||||
fn proto_tag(&self) -> ProtoTag {
|
||||
ProtoTag::Abridged
|
||||
}
|
||||
|
||||
fn encode(&self, frame: &Frame, dst: &mut BytesMut) -> io::Result<usize> {
|
||||
let before = dst.len();
|
||||
encode_abridged(frame, dst)?;
|
||||
Ok(dst.len() - before)
|
||||
}
|
||||
|
||||
fn decode(&self, src: &mut BytesMut) -> io::Result<Option<Frame>> {
|
||||
decode_abridged(src, self.max_frame_size)
|
||||
}
|
||||
|
||||
fn min_header_size(&self) -> usize {
|
||||
1
|
||||
}
|
||||
}
|
||||
|
||||
/// Intermediate protocol codec
|
||||
pub struct IntermediateCodec {
|
||||
max_frame_size: usize,
|
||||
}
|
||||
|
||||
impl IntermediateCodec {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
max_frame_size: 16 * 1024 * 1024,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for IntermediateCodec {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl Decoder for IntermediateCodec {
|
||||
type Item = Frame;
|
||||
type Error = io::Error;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
decode_intermediate(src, self.max_frame_size)
|
||||
}
|
||||
}
|
||||
|
||||
impl Encoder<Frame> for IntermediateCodec {
|
||||
type Error = io::Error;
|
||||
|
||||
fn encode(&mut self, frame: Frame, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
encode_intermediate(&frame, dst)
|
||||
}
|
||||
}
|
||||
|
||||
impl FrameCodecTrait for IntermediateCodec {
|
||||
fn proto_tag(&self) -> ProtoTag {
|
||||
ProtoTag::Intermediate
|
||||
}
|
||||
|
||||
fn encode(&self, frame: &Frame, dst: &mut BytesMut) -> io::Result<usize> {
|
||||
let before = dst.len();
|
||||
encode_intermediate(frame, dst)?;
|
||||
Ok(dst.len() - before)
|
||||
}
|
||||
|
||||
fn decode(&self, src: &mut BytesMut) -> io::Result<Option<Frame>> {
|
||||
decode_intermediate(src, self.max_frame_size)
|
||||
}
|
||||
|
||||
fn min_header_size(&self) -> usize {
|
||||
4
|
||||
}
|
||||
}
|
||||
|
||||
/// Secure Intermediate protocol codec
|
||||
pub struct SecureCodec {
|
||||
max_frame_size: usize,
|
||||
rng: Arc<SecureRandom>,
|
||||
}
|
||||
|
||||
impl SecureCodec {
|
||||
pub fn new(rng: Arc<SecureRandom>) -> Self {
|
||||
Self {
|
||||
max_frame_size: 16 * 1024 * 1024,
|
||||
rng,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SecureCodec {
|
||||
fn default() -> Self {
|
||||
Self::new(Arc::new(SecureRandom::new()))
|
||||
}
|
||||
}
|
||||
|
||||
impl Decoder for SecureCodec {
|
||||
type Item = Frame;
|
||||
type Error = io::Error;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
decode_secure(src, self.max_frame_size)
|
||||
}
|
||||
}
|
||||
|
||||
impl Encoder<Frame> for SecureCodec {
|
||||
type Error = io::Error;
|
||||
|
||||
fn encode(&mut self, frame: Frame, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
encode_secure(&frame, dst, &self.rng)
|
||||
}
|
||||
}
|
||||
|
||||
impl FrameCodecTrait for SecureCodec {
|
||||
fn proto_tag(&self) -> ProtoTag {
|
||||
ProtoTag::Secure
|
||||
}
|
||||
|
||||
fn encode(&self, frame: &Frame, dst: &mut BytesMut) -> io::Result<usize> {
|
||||
let before = dst.len();
|
||||
encode_secure(frame, dst, &self.rng)?;
|
||||
Ok(dst.len() - before)
|
||||
}
|
||||
|
||||
fn decode(&self, src: &mut BytesMut) -> io::Result<Option<Frame>> {
|
||||
decode_secure(src, self.max_frame_size)
|
||||
}
|
||||
|
||||
fn min_header_size(&self) -> usize {
|
||||
4
|
||||
}
|
||||
}
|
||||
|
||||
// ============= Tests =============
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tokio_util::codec::{FramedRead, FramedWrite};
|
||||
use tokio::io::duplex;
|
||||
use futures::{SinkExt, StreamExt};
|
||||
use crate::crypto::SecureRandom;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_framed_abridged() {
|
||||
let (client, server) = duplex(4096);
|
||||
|
||||
let mut writer = FramedWrite::new(client, AbridgedCodec::new());
|
||||
let mut reader = FramedRead::new(server, AbridgedCodec::new());
|
||||
|
||||
// Write a frame
|
||||
let frame = Frame::new(Bytes::from_static(&[1, 2, 3, 4, 5, 6, 7, 8]));
|
||||
writer.send(frame).await.unwrap();
|
||||
|
||||
// Read it back
|
||||
let received = reader.next().await.unwrap().unwrap();
|
||||
assert_eq!(&received.data[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_framed_intermediate() {
|
||||
let (client, server) = duplex(4096);
|
||||
|
||||
let mut writer = FramedWrite::new(client, IntermediateCodec::new());
|
||||
let mut reader = FramedRead::new(server, IntermediateCodec::new());
|
||||
|
||||
let frame = Frame::new(Bytes::from_static(b"hello world"));
|
||||
writer.send(frame).await.unwrap();
|
||||
|
||||
let received = reader.next().await.unwrap().unwrap();
|
||||
assert_eq!(&received.data[..], b"hello world");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_framed_secure() {
|
||||
let (client, server) = duplex(4096);
|
||||
|
||||
let mut writer = FramedWrite::new(client, SecureCodec::new(Arc::new(SecureRandom::new())));
|
||||
let mut reader = FramedRead::new(server, SecureCodec::new(Arc::new(SecureRandom::new())));
|
||||
|
||||
let original = Bytes::from_static(&[1, 2, 3, 4, 5, 6, 7, 8]);
|
||||
let frame = Frame::new(original.clone());
|
||||
writer.send(frame).await.unwrap();
|
||||
|
||||
let received = reader.next().await.unwrap().unwrap();
|
||||
assert_eq!(&received.data[..], &original[..]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_unified_codec() {
|
||||
for proto_tag in [ProtoTag::Abridged, ProtoTag::Intermediate, ProtoTag::Secure] {
|
||||
let (client, server) = duplex(4096);
|
||||
|
||||
let mut writer = FramedWrite::new(client, FrameCodec::new(proto_tag, Arc::new(SecureRandom::new())));
|
||||
let mut reader = FramedRead::new(server, FrameCodec::new(proto_tag, Arc::new(SecureRandom::new())));
|
||||
|
||||
// Use 4-byte aligned data for abridged compatibility
|
||||
let original = Bytes::from_static(&[1, 2, 3, 4, 5, 6, 7, 8]);
|
||||
let frame = Frame::new(original.clone());
|
||||
writer.send(frame).await.unwrap();
|
||||
|
||||
let received = reader.next().await.unwrap().unwrap();
|
||||
assert_eq!(received.data.len(), 8);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_multiple_frames() {
|
||||
let (client, server) = duplex(4096);
|
||||
|
||||
let mut writer = FramedWrite::new(client, IntermediateCodec::new());
|
||||
let mut reader = FramedRead::new(server, IntermediateCodec::new());
|
||||
|
||||
// Send multiple frames
|
||||
for i in 0..10 {
|
||||
let data: Vec<u8> = (0..((i + 1) * 10)).map(|j| (j % 256) as u8).collect();
|
||||
let frame = Frame::new(Bytes::from(data));
|
||||
writer.send(frame).await.unwrap();
|
||||
}
|
||||
|
||||
// Receive them
|
||||
for i in 0..10 {
|
||||
let received = reader.next().await.unwrap().unwrap();
|
||||
assert_eq!(received.data.len(), (i + 1) * 10);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_quickack_flag() {
|
||||
let (client, server) = duplex(4096);
|
||||
|
||||
let mut writer = FramedWrite::new(client, IntermediateCodec::new());
|
||||
let mut reader = FramedRead::new(server, IntermediateCodec::new());
|
||||
|
||||
let frame = Frame::quickack(Bytes::from_static(b"urgent"));
|
||||
writer.send(frame).await.unwrap();
|
||||
|
||||
let received = reader.next().await.unwrap().unwrap();
|
||||
assert!(received.meta.quickack);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_frame_too_large() {
|
||||
let mut codec = FrameCodec::new(ProtoTag::Intermediate, Arc::new(SecureRandom::new()))
|
||||
.with_max_frame_size(100);
|
||||
|
||||
// Create a "frame" that claims to be very large
|
||||
let mut buf = BytesMut::new();
|
||||
buf.extend_from_slice(&1000u32.to_le_bytes()); // length = 1000
|
||||
buf.extend_from_slice(&[0u8; 10]); // partial data
|
||||
|
||||
let result = codec.decode(&mut buf);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,13 @@
|
||||
//! MTProto frame stream wrappers
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
#![allow(dead_code)]
|
||||
|
||||
use bytes::Bytes;
|
||||
use std::io::{Error, ErrorKind, Result};
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncReadExt, AsyncWriteExt};
|
||||
use crate::protocol::constants::*;
|
||||
use crate::crypto::crc32;
|
||||
use crate::crypto::random::SECURE_RANDOM;
|
||||
use crate::crypto::{crc32, SecureRandom};
|
||||
use std::sync::Arc;
|
||||
use super::traits::{FrameMeta, LayeredStream};
|
||||
|
||||
// ============= Abridged (Compact) Frame =============
|
||||
@@ -76,7 +78,7 @@ impl<W> AbridgedFrameWriter<W> {
|
||||
impl<W: AsyncWrite + Unpin> AbridgedFrameWriter<W> {
|
||||
/// Write a frame
|
||||
pub async fn write_frame(&mut self, data: &[u8], meta: &FrameMeta) -> Result<()> {
|
||||
if data.len() % 4 != 0 {
|
||||
if !data.len().is_multiple_of(4) {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidInput,
|
||||
format!("Abridged frame must be aligned to 4 bytes, got {}", data.len()),
|
||||
@@ -232,11 +234,13 @@ impl<R: AsyncRead + Unpin> SecureIntermediateFrameReader<R> {
|
||||
let mut data = vec![0u8; len];
|
||||
self.upstream.read_exact(&mut data).await?;
|
||||
|
||||
// Strip padding (not aligned to 4)
|
||||
if len % 4 != 0 {
|
||||
let actual_len = len - (len % 4);
|
||||
data.truncate(actual_len);
|
||||
}
|
||||
let payload_len = secure_payload_len_from_wire_len(len).ok_or_else(|| {
|
||||
Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("Invalid secure frame length: {len}"),
|
||||
)
|
||||
})?;
|
||||
data.truncate(payload_len);
|
||||
|
||||
Ok((Bytes::from(data), meta))
|
||||
}
|
||||
@@ -251,11 +255,12 @@ impl<R> LayeredStream<R> for SecureIntermediateFrameReader<R> {
|
||||
/// Writer for secure intermediate MTProto framing
|
||||
pub struct SecureIntermediateFrameWriter<W> {
|
||||
upstream: W,
|
||||
rng: Arc<SecureRandom>,
|
||||
}
|
||||
|
||||
impl<W> SecureIntermediateFrameWriter<W> {
|
||||
pub fn new(upstream: W) -> Self {
|
||||
Self { upstream }
|
||||
pub fn new(upstream: W, rng: Arc<SecureRandom>) -> Self {
|
||||
Self { upstream, rng }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -266,9 +271,16 @@ impl<W: AsyncWrite + Unpin> SecureIntermediateFrameWriter<W> {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Add random padding (0-3 bytes)
|
||||
let padding_len = SECURE_RANDOM.range(4);
|
||||
let padding = SECURE_RANDOM.bytes(padding_len);
|
||||
if !is_valid_secure_payload_len(data.len()) {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("Secure payload must be 4-byte aligned, got {}", data.len()),
|
||||
));
|
||||
}
|
||||
|
||||
// Add padding so total length is never divisible by 4 (MTProto Secure)
|
||||
let padding_len = secure_padding_len(data.len(), &self.rng);
|
||||
let padding = self.rng.bytes(padding_len);
|
||||
|
||||
let total_len = data.len() + padding_len;
|
||||
let len_bytes = (total_len as u32).to_le_bytes();
|
||||
@@ -319,7 +331,7 @@ impl<R: AsyncRead + Unpin> MtprotoFrameReader<R> {
|
||||
}
|
||||
|
||||
// Validate length
|
||||
if len < MIN_MSG_LEN || len > MAX_MSG_LEN || len % PADDING_FILLER.len() != 0 {
|
||||
if !(MIN_MSG_LEN..=MAX_MSG_LEN).contains(&len) || !len.is_multiple_of(PADDING_FILLER.len()) {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("Invalid message length: {}", len),
|
||||
@@ -454,11 +466,11 @@ pub enum FrameWriterKind<W> {
|
||||
}
|
||||
|
||||
impl<W: AsyncWrite + Unpin> FrameWriterKind<W> {
|
||||
pub fn new(upstream: W, proto_tag: ProtoTag) -> Self {
|
||||
pub fn new(upstream: W, proto_tag: ProtoTag, rng: Arc<SecureRandom>) -> Self {
|
||||
match proto_tag {
|
||||
ProtoTag::Abridged => FrameWriterKind::Abridged(AbridgedFrameWriter::new(upstream)),
|
||||
ProtoTag::Intermediate => FrameWriterKind::Intermediate(IntermediateFrameWriter::new(upstream)),
|
||||
ProtoTag::Secure => FrameWriterKind::SecureIntermediate(SecureIntermediateFrameWriter::new(upstream)),
|
||||
ProtoTag::Secure => FrameWriterKind::SecureIntermediate(SecureIntermediateFrameWriter::new(upstream, rng)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -483,6 +495,8 @@ impl<W: AsyncWrite + Unpin> FrameWriterKind<W> {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tokio::io::duplex;
|
||||
use std::sync::Arc;
|
||||
use crate::crypto::SecureRandom;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_abridged_roundtrip() {
|
||||
@@ -539,7 +553,7 @@ mod tests {
|
||||
async fn test_secure_intermediate_padding() {
|
||||
let (client, server) = duplex(1024);
|
||||
|
||||
let mut writer = SecureIntermediateFrameWriter::new(client);
|
||||
let mut writer = SecureIntermediateFrameWriter::new(client, Arc::new(SecureRandom::new()));
|
||||
let mut reader = SecureIntermediateFrameReader::new(server);
|
||||
|
||||
let data = vec![1u8, 2, 3, 4, 5, 6, 7, 8];
|
||||
@@ -547,9 +561,7 @@ mod tests {
|
||||
writer.flush().await.unwrap();
|
||||
|
||||
let (received, _meta) = reader.read_frame().await.unwrap();
|
||||
// Received should have padding stripped to align to 4
|
||||
let expected_len = (data.len() / 4) * 4;
|
||||
assert_eq!(received.len(), expected_len);
|
||||
assert_eq!(received.len(), data.len());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -572,7 +584,7 @@ mod tests {
|
||||
async fn test_frame_reader_kind() {
|
||||
let (client, server) = duplex(1024);
|
||||
|
||||
let mut writer = FrameWriterKind::new(client, ProtoTag::Intermediate);
|
||||
let mut writer = FrameWriterKind::new(client, ProtoTag::Intermediate, Arc::new(SecureRandom::new()));
|
||||
let mut reader = FrameReaderKind::new(server, ProtoTag::Intermediate);
|
||||
|
||||
let data = vec![1u8, 2, 3, 4];
|
||||
@@ -582,4 +594,4 @@ mod tests {
|
||||
let (received, _) = reader.read_frame().await.unwrap();
|
||||
assert_eq!(&received[..], &data[..]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,49 @@
|
||||
//! Stream wrappers for MTProto protocol layers
|
||||
|
||||
pub mod state;
|
||||
pub mod buffer_pool;
|
||||
pub mod traits;
|
||||
pub mod crypto_stream;
|
||||
pub mod tls_stream;
|
||||
pub mod frame;
|
||||
pub mod frame_codec;
|
||||
|
||||
// Legacy compatibility - will be removed later
|
||||
pub mod frame_stream;
|
||||
|
||||
// Re-export state machine types
|
||||
#[allow(unused_imports)]
|
||||
pub use state::{
|
||||
StreamState, Transition, PollResult,
|
||||
ReadBuffer, WriteBuffer, HeaderBuffer, YieldBuffer,
|
||||
};
|
||||
|
||||
// Re-export buffer pool
|
||||
#[allow(unused_imports)]
|
||||
pub use buffer_pool::{BufferPool, PooledBuffer, PoolStats};
|
||||
|
||||
// Re-export stream implementations
|
||||
#[allow(unused_imports)]
|
||||
pub use crypto_stream::{CryptoReader, CryptoWriter, PassthroughStream};
|
||||
pub use tls_stream::{FakeTlsReader, FakeTlsWriter};
|
||||
pub use frame_stream::*;
|
||||
|
||||
// Re-export frame types
|
||||
#[allow(unused_imports)]
|
||||
pub use frame::{Frame, FrameMeta, FrameCodec as FrameCodecTrait, create_codec};
|
||||
|
||||
// Re-export tokio-util compatible codecs
|
||||
#[allow(unused_imports)]
|
||||
pub use frame_codec::{
|
||||
FrameCodec,
|
||||
AbridgedCodec, IntermediateCodec, SecureCodec,
|
||||
};
|
||||
|
||||
// Legacy re-exports for compatibility
|
||||
#[allow(unused_imports)]
|
||||
pub use frame_stream::{
|
||||
AbridgedFrameReader, AbridgedFrameWriter,
|
||||
IntermediateFrameReader, IntermediateFrameWriter,
|
||||
SecureIntermediateFrameReader, SecureIntermediateFrameWriter,
|
||||
MtprotoFrameReader, MtprotoFrameWriter,
|
||||
FrameReaderKind, FrameWriterKind,
|
||||
};
|
||||
|
||||
573
src/stream/state.rs
Normal file
573
src/stream/state.rs
Normal file
@@ -0,0 +1,573 @@
|
||||
//! State machine foundation types for async streams
|
||||
//!
|
||||
//! This module provides core types and traits for implementing
|
||||
//! stateful async streams with proper partial read/write handling.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use std::io;
|
||||
|
||||
// ============= Core Traits =============
|
||||
|
||||
/// Trait for stream states
|
||||
pub trait StreamState: Sized {
|
||||
/// Check if this is a terminal state (no more transitions possible)
|
||||
fn is_terminal(&self) -> bool;
|
||||
|
||||
/// Check if stream is in poisoned/error state
|
||||
fn is_poisoned(&self) -> bool;
|
||||
|
||||
/// Get human-readable state name for debugging
|
||||
fn state_name(&self) -> &'static str;
|
||||
}
|
||||
|
||||
// ============= Transition Types =============
|
||||
|
||||
/// Result of a state transition
|
||||
#[derive(Debug)]
|
||||
pub enum Transition<S, O> {
|
||||
/// Stay in the same state, no output
|
||||
Same,
|
||||
/// Transition to a new state, no output
|
||||
Next(S),
|
||||
/// Complete with output, typically transitions to Idle
|
||||
Complete(O),
|
||||
/// Yield output and transition to new state
|
||||
Yield(O, S),
|
||||
/// Error occurred, transition to error state
|
||||
Error(io::Error),
|
||||
}
|
||||
|
||||
impl<S, O> Transition<S, O> {
|
||||
/// Check if transition produces output
|
||||
pub fn has_output(&self) -> bool {
|
||||
matches!(self, Transition::Complete(_) | Transition::Yield(_, _))
|
||||
}
|
||||
|
||||
/// Map the output value
|
||||
pub fn map_output<U, F: FnOnce(O) -> U>(self, f: F) -> Transition<S, U> {
|
||||
match self {
|
||||
Transition::Same => Transition::Same,
|
||||
Transition::Next(s) => Transition::Next(s),
|
||||
Transition::Complete(o) => Transition::Complete(f(o)),
|
||||
Transition::Yield(o, s) => Transition::Yield(f(o), s),
|
||||
Transition::Error(e) => Transition::Error(e),
|
||||
}
|
||||
}
|
||||
|
||||
/// Map the state value
|
||||
pub fn map_state<T, F: FnOnce(S) -> T>(self, f: F) -> Transition<T, O> {
|
||||
match self {
|
||||
Transition::Same => Transition::Same,
|
||||
Transition::Next(s) => Transition::Next(f(s)),
|
||||
Transition::Complete(o) => Transition::Complete(o),
|
||||
Transition::Yield(o, s) => Transition::Yield(o, f(s)),
|
||||
Transition::Error(e) => Transition::Error(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============= Poll Result Types =============
|
||||
|
||||
/// Result of polling for more data
|
||||
#[derive(Debug)]
|
||||
pub enum PollResult<T> {
|
||||
/// Data is ready
|
||||
Ready(T),
|
||||
/// Operation would block, need to poll again
|
||||
Pending,
|
||||
/// Need more input data (minimum bytes required)
|
||||
NeedInput(usize),
|
||||
/// End of stream reached
|
||||
Eof,
|
||||
/// Error occurred
|
||||
Error(io::Error),
|
||||
}
|
||||
|
||||
impl<T> PollResult<T> {
|
||||
/// Check if result is ready
|
||||
pub fn is_ready(&self) -> bool {
|
||||
matches!(self, PollResult::Ready(_))
|
||||
}
|
||||
|
||||
/// Check if result indicates EOF
|
||||
pub fn is_eof(&self) -> bool {
|
||||
matches!(self, PollResult::Eof)
|
||||
}
|
||||
|
||||
/// Convert to Option, discarding non-ready states
|
||||
pub fn ok(self) -> Option<T> {
|
||||
match self {
|
||||
PollResult::Ready(t) => Some(t),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Map the value
|
||||
pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> PollResult<U> {
|
||||
match self {
|
||||
PollResult::Ready(t) => PollResult::Ready(f(t)),
|
||||
PollResult::Pending => PollResult::Pending,
|
||||
PollResult::NeedInput(n) => PollResult::NeedInput(n),
|
||||
PollResult::Eof => PollResult::Eof,
|
||||
PollResult::Error(e) => PollResult::Error(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<io::Result<T>> for PollResult<T> {
|
||||
fn from(result: io::Result<T>) -> Self {
|
||||
match result {
|
||||
Ok(t) => PollResult::Ready(t),
|
||||
Err(e) if e.kind() == io::ErrorKind::WouldBlock => PollResult::Pending,
|
||||
Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => PollResult::Eof,
|
||||
Err(e) => PollResult::Error(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============= Buffer State =============
|
||||
|
||||
/// State for buffered reading operations
|
||||
#[derive(Debug)]
|
||||
pub struct ReadBuffer {
|
||||
/// The buffer holding data
|
||||
buffer: BytesMut,
|
||||
/// Target number of bytes to read (if known)
|
||||
target: Option<usize>,
|
||||
}
|
||||
|
||||
impl ReadBuffer {
|
||||
/// Create new empty read buffer
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
buffer: BytesMut::with_capacity(8192),
|
||||
target: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create with specific capacity
|
||||
pub fn with_capacity(capacity: usize) -> Self {
|
||||
Self {
|
||||
buffer: BytesMut::with_capacity(capacity),
|
||||
target: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create with target size
|
||||
pub fn with_target(target: usize) -> Self {
|
||||
Self {
|
||||
buffer: BytesMut::with_capacity(target),
|
||||
target: Some(target),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get current buffer length
|
||||
pub fn len(&self) -> usize {
|
||||
self.buffer.len()
|
||||
}
|
||||
|
||||
/// Check if buffer is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.buffer.is_empty()
|
||||
}
|
||||
|
||||
/// Check if target is reached
|
||||
pub fn is_complete(&self) -> bool {
|
||||
match self.target {
|
||||
Some(t) => self.buffer.len() >= t,
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get remaining bytes needed
|
||||
pub fn remaining(&self) -> usize {
|
||||
match self.target {
|
||||
Some(t) => t.saturating_sub(self.buffer.len()),
|
||||
None => 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Append data to buffer
|
||||
pub fn extend(&mut self, data: &[u8]) {
|
||||
self.buffer.extend_from_slice(data);
|
||||
}
|
||||
|
||||
/// Take all data from buffer
|
||||
pub fn take(&mut self) -> Bytes {
|
||||
self.target = None;
|
||||
self.buffer.split().freeze()
|
||||
}
|
||||
|
||||
/// Take exactly n bytes
|
||||
pub fn take_exact(&mut self, n: usize) -> Option<Bytes> {
|
||||
if self.buffer.len() >= n {
|
||||
Some(self.buffer.split_to(n).freeze())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a slice of the buffer
|
||||
pub fn as_slice(&self) -> &[u8] {
|
||||
&self.buffer
|
||||
}
|
||||
|
||||
/// Get mutable access to underlying BytesMut
|
||||
pub fn as_bytes_mut(&mut self) -> &mut BytesMut {
|
||||
&mut self.buffer
|
||||
}
|
||||
|
||||
/// Clear the buffer
|
||||
pub fn clear(&mut self) {
|
||||
self.buffer.clear();
|
||||
self.target = None;
|
||||
}
|
||||
|
||||
/// Set new target
|
||||
pub fn set_target(&mut self, target: usize) {
|
||||
self.target = Some(target);
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ReadBuffer {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// State for buffered writing operations
|
||||
#[derive(Debug)]
|
||||
pub struct WriteBuffer {
|
||||
/// The buffer holding data to write
|
||||
buffer: BytesMut,
|
||||
/// Position of next byte to write
|
||||
position: usize,
|
||||
/// Maximum buffer size
|
||||
max_size: usize,
|
||||
}
|
||||
|
||||
impl WriteBuffer {
|
||||
/// Create new write buffer with default max size (256KB)
|
||||
pub fn new() -> Self {
|
||||
Self::with_max_size(256 * 1024)
|
||||
}
|
||||
|
||||
/// Create with specific max size
|
||||
pub fn with_max_size(max_size: usize) -> Self {
|
||||
Self {
|
||||
buffer: BytesMut::with_capacity(8192),
|
||||
position: 0,
|
||||
max_size,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get pending bytes count
|
||||
pub fn len(&self) -> usize {
|
||||
self.buffer.len() - self.position
|
||||
}
|
||||
|
||||
/// Check if buffer is empty (all written)
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.position >= self.buffer.len()
|
||||
}
|
||||
|
||||
/// Check if buffer is full
|
||||
pub fn is_full(&self) -> bool {
|
||||
self.buffer.len() >= self.max_size
|
||||
}
|
||||
|
||||
/// Get remaining capacity
|
||||
pub fn remaining_capacity(&self) -> usize {
|
||||
self.max_size.saturating_sub(self.buffer.len())
|
||||
}
|
||||
|
||||
/// Append data to buffer
|
||||
pub fn extend(&mut self, data: &[u8]) -> Result<(), ()> {
|
||||
if self.buffer.len() + data.len() > self.max_size {
|
||||
return Err(());
|
||||
}
|
||||
self.buffer.extend_from_slice(data);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get slice of data to write
|
||||
pub fn pending(&self) -> &[u8] {
|
||||
&self.buffer[self.position..]
|
||||
}
|
||||
|
||||
/// Advance position by n bytes (after successful write)
|
||||
pub fn advance(&mut self, n: usize) {
|
||||
self.position += n;
|
||||
|
||||
// If all data written, reset buffer
|
||||
if self.position >= self.buffer.len() {
|
||||
self.buffer.clear();
|
||||
self.position = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/// Clear the buffer
|
||||
pub fn clear(&mut self) {
|
||||
self.buffer.clear();
|
||||
self.position = 0;
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for WriteBuffer {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
// ============= Fixed-Size Buffer States =============
|
||||
|
||||
/// State for reading a fixed-size header
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HeaderBuffer<const N: usize> {
|
||||
/// The buffer
|
||||
data: [u8; N],
|
||||
/// Bytes filled so far
|
||||
filled: usize,
|
||||
}
|
||||
|
||||
impl<const N: usize> HeaderBuffer<N> {
|
||||
/// Create new empty header buffer
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
data: [0u8; N],
|
||||
filled: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get slice for reading into
|
||||
pub fn unfilled_mut(&mut self) -> &mut [u8] {
|
||||
&mut self.data[self.filled..]
|
||||
}
|
||||
|
||||
/// Advance filled count
|
||||
pub fn advance(&mut self, n: usize) {
|
||||
self.filled = (self.filled + n).min(N);
|
||||
}
|
||||
|
||||
/// Check if completely filled
|
||||
pub fn is_complete(&self) -> bool {
|
||||
self.filled >= N
|
||||
}
|
||||
|
||||
/// Get remaining bytes needed
|
||||
pub fn remaining(&self) -> usize {
|
||||
N - self.filled
|
||||
}
|
||||
|
||||
/// Get filled bytes as slice
|
||||
pub fn as_slice(&self) -> &[u8] {
|
||||
&self.data[..self.filled]
|
||||
}
|
||||
|
||||
/// Get complete buffer (panics if not complete)
|
||||
pub fn as_array(&self) -> &[u8; N] {
|
||||
assert!(self.is_complete());
|
||||
&self.data
|
||||
}
|
||||
|
||||
/// Take the buffer, resetting state
|
||||
pub fn take(&mut self) -> [u8; N] {
|
||||
let data = self.data;
|
||||
self.data = [0u8; N];
|
||||
self.filled = 0;
|
||||
data
|
||||
}
|
||||
|
||||
/// Reset to empty state
|
||||
pub fn reset(&mut self) {
|
||||
self.filled = 0;
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> Default for HeaderBuffer<N> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
// ============= Yield Buffer =============
|
||||
|
||||
/// Buffer for yielding data to caller in chunks
|
||||
#[derive(Debug)]
|
||||
pub struct YieldBuffer {
|
||||
data: Bytes,
|
||||
position: usize,
|
||||
}
|
||||
|
||||
impl YieldBuffer {
|
||||
/// Create new yield buffer
|
||||
pub fn new(data: Bytes) -> Self {
|
||||
Self { data, position: 0 }
|
||||
}
|
||||
|
||||
/// Check if all data has been yielded
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.position >= self.data.len()
|
||||
}
|
||||
|
||||
/// Get remaining bytes
|
||||
pub fn remaining(&self) -> usize {
|
||||
self.data.len() - self.position
|
||||
}
|
||||
|
||||
/// Copy data to output slice, return bytes copied
|
||||
pub fn copy_to(&mut self, dst: &mut [u8]) -> usize {
|
||||
let available = &self.data[self.position..];
|
||||
let to_copy = available.len().min(dst.len());
|
||||
dst[..to_copy].copy_from_slice(&available[..to_copy]);
|
||||
self.position += to_copy;
|
||||
to_copy
|
||||
}
|
||||
|
||||
/// Get remaining data as slice
|
||||
pub fn as_slice(&self) -> &[u8] {
|
||||
&self.data[self.position..]
|
||||
}
|
||||
}
|
||||
|
||||
// ============= Macros =============
|
||||
|
||||
/// Macro to simplify state transitions in poll methods
|
||||
#[macro_export]
|
||||
macro_rules! transition {
|
||||
(same) => {
|
||||
$crate::stream::state::Transition::Same
|
||||
};
|
||||
(next $state:expr) => {
|
||||
$crate::stream::state::Transition::Next($state)
|
||||
};
|
||||
(complete $output:expr) => {
|
||||
$crate::stream::state::Transition::Complete($output)
|
||||
};
|
||||
(yield $output:expr, $state:expr) => {
|
||||
$crate::stream::state::Transition::Yield($output, $state)
|
||||
};
|
||||
(error $err:expr) => {
|
||||
$crate::stream::state::Transition::Error($err)
|
||||
};
|
||||
}
|
||||
|
||||
/// Macro to match poll ready or return pending
|
||||
#[macro_export]
|
||||
macro_rules! ready_or_pending {
|
||||
($poll:expr) => {
|
||||
match $poll {
|
||||
std::task::Poll::Ready(t) => t,
|
||||
std::task::Poll::Pending => return std::task::Poll::Pending,
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_read_buffer_basic() {
|
||||
let mut buf = ReadBuffer::with_target(10);
|
||||
assert_eq!(buf.remaining(), 10);
|
||||
assert!(!buf.is_complete());
|
||||
|
||||
buf.extend(b"hello");
|
||||
assert_eq!(buf.len(), 5);
|
||||
assert_eq!(buf.remaining(), 5);
|
||||
assert!(!buf.is_complete());
|
||||
|
||||
buf.extend(b"world");
|
||||
assert_eq!(buf.len(), 10);
|
||||
assert!(buf.is_complete());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_buffer_take() {
|
||||
let mut buf = ReadBuffer::new();
|
||||
buf.extend(b"test data");
|
||||
|
||||
let data = buf.take();
|
||||
assert_eq!(&data[..], b"test data");
|
||||
assert!(buf.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_buffer_basic() {
|
||||
let mut buf = WriteBuffer::with_max_size(100);
|
||||
assert!(buf.is_empty());
|
||||
|
||||
buf.extend(b"hello").unwrap();
|
||||
assert_eq!(buf.len(), 5);
|
||||
assert!(!buf.is_empty());
|
||||
|
||||
buf.advance(3);
|
||||
assert_eq!(buf.len(), 2);
|
||||
assert_eq!(buf.pending(), b"lo");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_buffer_overflow() {
|
||||
let mut buf = WriteBuffer::with_max_size(10);
|
||||
assert!(buf.extend(b"short").is_ok());
|
||||
assert!(buf.extend(b"toolong").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_header_buffer() {
|
||||
let mut buf = HeaderBuffer::<5>::new();
|
||||
assert!(!buf.is_complete());
|
||||
assert_eq!(buf.remaining(), 5);
|
||||
|
||||
buf.unfilled_mut()[..3].copy_from_slice(b"hel");
|
||||
buf.advance(3);
|
||||
assert_eq!(buf.remaining(), 2);
|
||||
|
||||
buf.unfilled_mut()[..2].copy_from_slice(b"lo");
|
||||
buf.advance(2);
|
||||
assert!(buf.is_complete());
|
||||
assert_eq!(buf.as_array(), b"hello");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_yield_buffer() {
|
||||
let mut buf = YieldBuffer::new(Bytes::from_static(b"hello world"));
|
||||
|
||||
let mut dst = [0u8; 5];
|
||||
assert_eq!(buf.copy_to(&mut dst), 5);
|
||||
assert_eq!(&dst, b"hello");
|
||||
|
||||
assert_eq!(buf.remaining(), 6);
|
||||
|
||||
let mut dst = [0u8; 10];
|
||||
assert_eq!(buf.copy_to(&mut dst), 6);
|
||||
assert_eq!(&dst[..6], b" world");
|
||||
|
||||
assert!(buf.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transition_map() {
|
||||
let t: Transition<i32, String> = Transition::Complete("hello".to_string());
|
||||
let t = t.map_output(|s| s.len());
|
||||
|
||||
match t {
|
||||
Transition::Complete(5) => {}
|
||||
_ => panic!("Expected Complete(5)"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_poll_result() {
|
||||
let r: PollResult<i32> = PollResult::Ready(42);
|
||||
assert!(r.is_ready());
|
||||
assert_eq!(r.ok(), Some(42));
|
||||
|
||||
let r: PollResult<i32> = PollResult::Eof;
|
||||
assert!(r.is_eof());
|
||||
assert_eq!(r.ok(), None);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,7 @@
|
||||
//! Stream traits and common types
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use bytes::Bytes;
|
||||
use std::io::Result;
|
||||
use std::pin::Pin;
|
||||
|
||||
255
src/tls_front/cache.rs
Normal file
255
src/tls_front/cache.rs
Normal file
@@ -0,0 +1,255 @@
|
||||
use std::collections::HashMap;
|
||||
use std::net::IpAddr;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant, SystemTime};
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
use tokio::time::sleep;
|
||||
use tracing::{debug, warn, info};
|
||||
|
||||
use crate::tls_front::types::{CachedTlsData, ParsedServerHello, TlsFetchResult};
|
||||
|
||||
/// Lightweight in-memory + optional on-disk cache for TLS fronting data.
|
||||
#[derive(Debug)]
|
||||
pub struct TlsFrontCache {
|
||||
memory: RwLock<HashMap<String, Arc<CachedTlsData>>>,
|
||||
default: Arc<CachedTlsData>,
|
||||
full_cert_sent: RwLock<HashMap<IpAddr, Instant>>,
|
||||
disk_path: PathBuf,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl TlsFrontCache {
|
||||
pub fn new(domains: &[String], default_len: usize, disk_path: impl AsRef<Path>) -> Self {
|
||||
let default_template = ParsedServerHello {
|
||||
version: [0x03, 0x03],
|
||||
random: [0u8; 32],
|
||||
session_id: Vec::new(),
|
||||
cipher_suite: [0x13, 0x01],
|
||||
compression: 0,
|
||||
extensions: Vec::new(),
|
||||
};
|
||||
|
||||
let default = Arc::new(CachedTlsData {
|
||||
server_hello_template: default_template,
|
||||
cert_info: None,
|
||||
cert_payload: None,
|
||||
app_data_records_sizes: vec![default_len],
|
||||
total_app_data_len: default_len,
|
||||
fetched_at: SystemTime::now(),
|
||||
domain: "default".to_string(),
|
||||
});
|
||||
|
||||
let mut map = HashMap::new();
|
||||
for d in domains {
|
||||
map.insert(d.clone(), default.clone());
|
||||
}
|
||||
|
||||
Self {
|
||||
memory: RwLock::new(map),
|
||||
default,
|
||||
full_cert_sent: RwLock::new(HashMap::new()),
|
||||
disk_path: disk_path.as_ref().to_path_buf(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get(&self, sni: &str) -> Arc<CachedTlsData> {
|
||||
let guard = self.memory.read().await;
|
||||
guard.get(sni).cloned().unwrap_or_else(|| self.default.clone())
|
||||
}
|
||||
|
||||
pub async fn contains_domain(&self, domain: &str) -> bool {
|
||||
self.memory.read().await.contains_key(domain)
|
||||
}
|
||||
|
||||
/// Returns true when full cert payload should be sent for client_ip
|
||||
/// according to TTL policy.
|
||||
pub async fn take_full_cert_budget_for_ip(
|
||||
&self,
|
||||
client_ip: IpAddr,
|
||||
ttl: Duration,
|
||||
) -> bool {
|
||||
if ttl.is_zero() {
|
||||
self.full_cert_sent
|
||||
.write()
|
||||
.await
|
||||
.insert(client_ip, Instant::now());
|
||||
return true;
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
let mut guard = self.full_cert_sent.write().await;
|
||||
guard.retain(|_, seen_at| now.duration_since(*seen_at) < ttl);
|
||||
|
||||
match guard.get_mut(&client_ip) {
|
||||
Some(seen_at) => {
|
||||
if now.duration_since(*seen_at) >= ttl {
|
||||
*seen_at = now;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
None => {
|
||||
guard.insert(client_ip, now);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn set(&self, domain: &str, data: CachedTlsData) {
|
||||
let mut guard = self.memory.write().await;
|
||||
guard.insert(domain.to_string(), Arc::new(data));
|
||||
}
|
||||
|
||||
pub async fn load_from_disk(&self) {
|
||||
let path = self.disk_path.clone();
|
||||
if tokio::fs::create_dir_all(&path).await.is_err() {
|
||||
return;
|
||||
}
|
||||
let mut loaded = 0usize;
|
||||
if let Ok(mut dir) = tokio::fs::read_dir(&path).await {
|
||||
while let Ok(Some(entry)) = dir.next_entry().await {
|
||||
if let Ok(name) = entry.file_name().into_string() {
|
||||
if !name.ends_with(".json") {
|
||||
continue;
|
||||
}
|
||||
if let Ok(data) = tokio::fs::read(entry.path()).await
|
||||
&& let Ok(mut cached) = serde_json::from_slice::<CachedTlsData>(&data)
|
||||
{
|
||||
if cached.domain.is_empty()
|
||||
|| cached.domain.len() > 255
|
||||
|| !cached.domain.chars().all(|c| c.is_ascii_alphanumeric() || c == '.' || c == '-')
|
||||
{
|
||||
warn!(file = %name, "Skipping TLS cache entry with invalid domain");
|
||||
continue;
|
||||
}
|
||||
// fetched_at is skipped during deserialization; approximate with file mtime if available.
|
||||
if let Ok(meta) = entry.metadata().await
|
||||
&& let Ok(modified) = meta.modified()
|
||||
{
|
||||
cached.fetched_at = modified;
|
||||
}
|
||||
// Drop entries older than 72h
|
||||
if let Ok(age) = cached.fetched_at.elapsed()
|
||||
&& age > Duration::from_secs(72 * 3600)
|
||||
{
|
||||
warn!(domain = %cached.domain, "Skipping stale TLS cache entry (>72h)");
|
||||
continue;
|
||||
}
|
||||
let domain = cached.domain.clone();
|
||||
self.set(&domain, cached).await;
|
||||
loaded += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if loaded > 0 {
|
||||
info!(count = loaded, "Loaded TLS cache entries from disk");
|
||||
}
|
||||
}
|
||||
|
||||
async fn persist(&self, domain: &str, data: &CachedTlsData) {
|
||||
if tokio::fs::create_dir_all(&self.disk_path).await.is_err() {
|
||||
return;
|
||||
}
|
||||
let fname = format!("{}.json", domain.replace(['/', '\\'], "_"));
|
||||
let path = self.disk_path.join(fname);
|
||||
if let Ok(json) = serde_json::to_vec_pretty(data) {
|
||||
// best-effort write
|
||||
let _ = tokio::fs::write(path, json).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn background updater that periodically refreshes cached domains using provided fetcher.
|
||||
pub fn spawn_updater<F>(
|
||||
self: Arc<Self>,
|
||||
domains: Vec<String>,
|
||||
interval: Duration,
|
||||
fetcher: F,
|
||||
) where
|
||||
F: Fn(String) -> tokio::task::JoinHandle<()> + Send + Sync + 'static,
|
||||
{
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
for domain in &domains {
|
||||
let _ = fetcher(domain.clone()).await;
|
||||
}
|
||||
sleep(interval).await;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Replace cached entry from a fetch result.
|
||||
pub async fn update_from_fetch(&self, domain: &str, fetched: TlsFetchResult) {
|
||||
let data = CachedTlsData {
|
||||
server_hello_template: fetched.server_hello_parsed,
|
||||
cert_info: fetched.cert_info,
|
||||
cert_payload: fetched.cert_payload,
|
||||
app_data_records_sizes: fetched.app_data_records_sizes.clone(),
|
||||
total_app_data_len: fetched.total_app_data_len,
|
||||
fetched_at: SystemTime::now(),
|
||||
domain: domain.to_string(),
|
||||
};
|
||||
|
||||
self.set(domain, data.clone()).await;
|
||||
self.persist(domain, &data).await;
|
||||
debug!(domain = %domain, len = fetched.total_app_data_len, "TLS cache updated");
|
||||
}
|
||||
|
||||
pub fn default_entry(&self) -> Arc<CachedTlsData> {
|
||||
self.default.clone()
|
||||
}
|
||||
|
||||
pub fn disk_path(&self) -> &Path {
|
||||
&self.disk_path
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_take_full_cert_budget_for_ip_uses_ttl() {
|
||||
let cache = TlsFrontCache::new(
|
||||
&["example.com".to_string()],
|
||||
1024,
|
||||
"tlsfront-test-cache",
|
||||
);
|
||||
let ip: IpAddr = "127.0.0.1".parse().expect("ip");
|
||||
let ttl = Duration::from_millis(80);
|
||||
|
||||
assert!(cache
|
||||
.take_full_cert_budget_for_ip(ip, ttl)
|
||||
.await);
|
||||
assert!(!cache
|
||||
.take_full_cert_budget_for_ip(ip, ttl)
|
||||
.await);
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(90)).await;
|
||||
|
||||
assert!(cache
|
||||
.take_full_cert_budget_for_ip(ip, ttl)
|
||||
.await);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_take_full_cert_budget_for_ip_zero_ttl_always_allows_full_payload() {
|
||||
let cache = TlsFrontCache::new(
|
||||
&["example.com".to_string()],
|
||||
1024,
|
||||
"tlsfront-test-cache",
|
||||
);
|
||||
let ip: IpAddr = "127.0.0.1".parse().expect("ip");
|
||||
let ttl = Duration::ZERO;
|
||||
|
||||
assert!(cache
|
||||
.take_full_cert_budget_for_ip(ip, ttl)
|
||||
.await);
|
||||
assert!(cache
|
||||
.take_full_cert_budget_for_ip(ip, ttl)
|
||||
.await);
|
||||
}
|
||||
}
|
||||
388
src/tls_front/emulator.rs
Normal file
388
src/tls_front/emulator.rs
Normal file
@@ -0,0 +1,388 @@
|
||||
use crate::crypto::{sha256_hmac, SecureRandom};
|
||||
use crate::protocol::constants::{
|
||||
TLS_RECORD_APPLICATION, TLS_RECORD_CHANGE_CIPHER, TLS_RECORD_HANDSHAKE, TLS_VERSION,
|
||||
};
|
||||
use crate::protocol::tls::{TLS_DIGEST_LEN, TLS_DIGEST_POS, gen_fake_x25519_key};
|
||||
use crate::tls_front::types::{CachedTlsData, ParsedCertificateInfo};
|
||||
|
||||
const MIN_APP_DATA: usize = 64;
|
||||
const MAX_APP_DATA: usize = 16640; // RFC 8446 §5.2 allows up to 2^14 + 256
|
||||
|
||||
fn jitter_and_clamp_sizes(sizes: &[usize], rng: &SecureRandom) -> Vec<usize> {
|
||||
sizes
|
||||
.iter()
|
||||
.map(|&size| {
|
||||
let base = size.clamp(MIN_APP_DATA, MAX_APP_DATA);
|
||||
let jitter_range = ((base as f64) * 0.03).round() as i64;
|
||||
if jitter_range == 0 {
|
||||
return base;
|
||||
}
|
||||
let mut rand_bytes = [0u8; 2];
|
||||
rand_bytes.copy_from_slice(&rng.bytes(2));
|
||||
let span = 2 * jitter_range + 1;
|
||||
let delta = (u16::from_le_bytes(rand_bytes) as i64 % span) - jitter_range;
|
||||
let adjusted = (base as i64 + delta).clamp(MIN_APP_DATA as i64, MAX_APP_DATA as i64);
|
||||
adjusted as usize
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn app_data_body_capacity(sizes: &[usize]) -> usize {
|
||||
sizes.iter().map(|&size| size.saturating_sub(17)).sum()
|
||||
}
|
||||
|
||||
fn ensure_payload_capacity(mut sizes: Vec<usize>, payload_len: usize) -> Vec<usize> {
|
||||
if payload_len == 0 {
|
||||
return sizes;
|
||||
}
|
||||
|
||||
let mut body_total = app_data_body_capacity(&sizes);
|
||||
if body_total >= payload_len {
|
||||
return sizes;
|
||||
}
|
||||
|
||||
if let Some(last) = sizes.last_mut() {
|
||||
let free = MAX_APP_DATA.saturating_sub(*last);
|
||||
let grow = free.min(payload_len - body_total);
|
||||
*last += grow;
|
||||
body_total += grow;
|
||||
}
|
||||
|
||||
while body_total < payload_len {
|
||||
let remaining = payload_len - body_total;
|
||||
let chunk = (remaining + 17).clamp(MIN_APP_DATA, MAX_APP_DATA);
|
||||
sizes.push(chunk);
|
||||
body_total += chunk.saturating_sub(17);
|
||||
}
|
||||
|
||||
sizes
|
||||
}
|
||||
|
||||
fn build_compact_cert_info_payload(cert_info: &ParsedCertificateInfo) -> Option<Vec<u8>> {
|
||||
let mut fields = Vec::new();
|
||||
|
||||
if let Some(subject) = cert_info.subject_cn.as_deref() {
|
||||
fields.push(format!("CN={subject}"));
|
||||
}
|
||||
if let Some(issuer) = cert_info.issuer_cn.as_deref() {
|
||||
fields.push(format!("ISSUER={issuer}"));
|
||||
}
|
||||
if let Some(not_before) = cert_info.not_before_unix {
|
||||
fields.push(format!("NB={not_before}"));
|
||||
}
|
||||
if let Some(not_after) = cert_info.not_after_unix {
|
||||
fields.push(format!("NA={not_after}"));
|
||||
}
|
||||
if !cert_info.san_names.is_empty() {
|
||||
let san = cert_info
|
||||
.san_names
|
||||
.iter()
|
||||
.take(8)
|
||||
.map(String::as_str)
|
||||
.collect::<Vec<_>>()
|
||||
.join(",");
|
||||
fields.push(format!("SAN={san}"));
|
||||
}
|
||||
|
||||
if fields.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut payload = fields.join(";").into_bytes();
|
||||
if payload.len() > 512 {
|
||||
payload.truncate(512);
|
||||
}
|
||||
Some(payload)
|
||||
}
|
||||
|
||||
/// Build a ServerHello + CCS + ApplicationData sequence using cached TLS metadata.
|
||||
pub fn build_emulated_server_hello(
|
||||
secret: &[u8],
|
||||
client_digest: &[u8; TLS_DIGEST_LEN],
|
||||
session_id: &[u8],
|
||||
cached: &CachedTlsData,
|
||||
use_full_cert_payload: bool,
|
||||
rng: &SecureRandom,
|
||||
alpn: Option<Vec<u8>>,
|
||||
new_session_tickets: u8,
|
||||
) -> Vec<u8> {
|
||||
// --- ServerHello ---
|
||||
let mut extensions = Vec::new();
|
||||
// KeyShare (x25519)
|
||||
let key = gen_fake_x25519_key(rng);
|
||||
extensions.extend_from_slice(&0x0033u16.to_be_bytes()); // key_share
|
||||
extensions.extend_from_slice(&(2 + 2 + 32u16).to_be_bytes()); // len
|
||||
extensions.extend_from_slice(&0x001du16.to_be_bytes()); // X25519
|
||||
extensions.extend_from_slice(&(32u16).to_be_bytes());
|
||||
extensions.extend_from_slice(&key);
|
||||
// supported_versions (TLS1.3)
|
||||
extensions.extend_from_slice(&0x002bu16.to_be_bytes());
|
||||
extensions.extend_from_slice(&(2u16).to_be_bytes());
|
||||
extensions.extend_from_slice(&0x0304u16.to_be_bytes());
|
||||
if let Some(alpn_proto) = &alpn {
|
||||
extensions.extend_from_slice(&0x0010u16.to_be_bytes());
|
||||
let list_len: u16 = 1 + alpn_proto.len() as u16;
|
||||
let ext_len: u16 = 2 + list_len;
|
||||
extensions.extend_from_slice(&ext_len.to_be_bytes());
|
||||
extensions.extend_from_slice(&list_len.to_be_bytes());
|
||||
extensions.push(alpn_proto.len() as u8);
|
||||
extensions.extend_from_slice(alpn_proto);
|
||||
}
|
||||
|
||||
let extensions_len = extensions.len() as u16;
|
||||
|
||||
let body_len = 2 + // version
|
||||
32 + // random
|
||||
1 + session_id.len() + // session id
|
||||
2 + // cipher
|
||||
1 + // compression
|
||||
2 + extensions.len(); // extensions
|
||||
|
||||
let mut message = Vec::with_capacity(4 + body_len);
|
||||
message.push(0x02); // ServerHello
|
||||
let len_bytes = (body_len as u32).to_be_bytes();
|
||||
message.extend_from_slice(&len_bytes[1..4]);
|
||||
message.extend_from_slice(&cached.server_hello_template.version); // 0x0303
|
||||
message.extend_from_slice(&[0u8; 32]); // random placeholder
|
||||
message.push(session_id.len() as u8);
|
||||
message.extend_from_slice(session_id);
|
||||
let cipher = if cached.server_hello_template.cipher_suite == [0, 0] {
|
||||
[0x13, 0x01]
|
||||
} else {
|
||||
cached.server_hello_template.cipher_suite
|
||||
};
|
||||
message.extend_from_slice(&cipher);
|
||||
message.push(cached.server_hello_template.compression);
|
||||
message.extend_from_slice(&extensions_len.to_be_bytes());
|
||||
message.extend_from_slice(&extensions);
|
||||
|
||||
let mut server_hello = Vec::with_capacity(5 + message.len());
|
||||
server_hello.push(TLS_RECORD_HANDSHAKE);
|
||||
server_hello.extend_from_slice(&TLS_VERSION);
|
||||
server_hello.extend_from_slice(&(message.len() as u16).to_be_bytes());
|
||||
server_hello.extend_from_slice(&message);
|
||||
|
||||
// --- ChangeCipherSpec ---
|
||||
let change_cipher_spec = [
|
||||
TLS_RECORD_CHANGE_CIPHER,
|
||||
TLS_VERSION[0],
|
||||
TLS_VERSION[1],
|
||||
0x00,
|
||||
0x01,
|
||||
0x01,
|
||||
];
|
||||
|
||||
// --- ApplicationData (fake encrypted records) ---
|
||||
// Use the same number and sizes of ApplicationData records as the cached server.
|
||||
let mut sizes = cached.app_data_records_sizes.clone();
|
||||
if sizes.is_empty() {
|
||||
sizes.push(cached.total_app_data_len.max(1024));
|
||||
}
|
||||
let mut sizes = jitter_and_clamp_sizes(&sizes, rng);
|
||||
let compact_payload = cached
|
||||
.cert_info
|
||||
.as_ref()
|
||||
.and_then(build_compact_cert_info_payload);
|
||||
let selected_payload: Option<&[u8]> = if use_full_cert_payload {
|
||||
cached
|
||||
.cert_payload
|
||||
.as_ref()
|
||||
.map(|payload| payload.certificate_message.as_slice())
|
||||
.filter(|payload| !payload.is_empty())
|
||||
.or(compact_payload.as_deref())
|
||||
} else {
|
||||
compact_payload.as_deref()
|
||||
};
|
||||
|
||||
if let Some(payload) = selected_payload {
|
||||
sizes = ensure_payload_capacity(sizes, payload.len());
|
||||
}
|
||||
|
||||
let mut app_data = Vec::new();
|
||||
let mut payload_offset = 0usize;
|
||||
for size in sizes {
|
||||
let mut rec = Vec::with_capacity(5 + size);
|
||||
rec.push(TLS_RECORD_APPLICATION);
|
||||
rec.extend_from_slice(&TLS_VERSION);
|
||||
rec.extend_from_slice(&(size as u16).to_be_bytes());
|
||||
|
||||
if let Some(payload) = selected_payload {
|
||||
if size > 17 {
|
||||
let body_len = size - 17;
|
||||
let remaining = payload.len().saturating_sub(payload_offset);
|
||||
let copy_len = remaining.min(body_len);
|
||||
if copy_len > 0 {
|
||||
rec.extend_from_slice(&payload[payload_offset..payload_offset + copy_len]);
|
||||
payload_offset += copy_len;
|
||||
}
|
||||
if body_len > copy_len {
|
||||
rec.extend_from_slice(&rng.bytes(body_len - copy_len));
|
||||
}
|
||||
rec.push(0x16); // inner content type marker (handshake)
|
||||
rec.extend_from_slice(&rng.bytes(16)); // AEAD-like tag
|
||||
} else {
|
||||
rec.extend_from_slice(&rng.bytes(size));
|
||||
}
|
||||
} else if size > 17 {
|
||||
let body_len = size - 17;
|
||||
rec.extend_from_slice(&rng.bytes(body_len));
|
||||
rec.push(0x16); // inner content type marker (handshake)
|
||||
rec.extend_from_slice(&rng.bytes(16)); // AEAD-like tag
|
||||
} else {
|
||||
rec.extend_from_slice(&rng.bytes(size));
|
||||
}
|
||||
app_data.extend_from_slice(&rec);
|
||||
}
|
||||
|
||||
// --- Combine ---
|
||||
// Optional NewSessionTicket mimic records (opaque ApplicationData for fingerprint).
|
||||
let mut tickets = Vec::new();
|
||||
if new_session_tickets > 0 {
|
||||
for _ in 0..new_session_tickets {
|
||||
let ticket_len: usize = rng.range(48) + 48;
|
||||
let mut rec = Vec::with_capacity(5 + ticket_len);
|
||||
rec.push(TLS_RECORD_APPLICATION);
|
||||
rec.extend_from_slice(&TLS_VERSION);
|
||||
rec.extend_from_slice(&(ticket_len as u16).to_be_bytes());
|
||||
rec.extend_from_slice(&rng.bytes(ticket_len));
|
||||
tickets.extend_from_slice(&rec);
|
||||
}
|
||||
}
|
||||
|
||||
let mut response = Vec::with_capacity(server_hello.len() + change_cipher_spec.len() + app_data.len() + tickets.len());
|
||||
response.extend_from_slice(&server_hello);
|
||||
response.extend_from_slice(&change_cipher_spec);
|
||||
response.extend_from_slice(&app_data);
|
||||
response.extend_from_slice(&tickets);
|
||||
|
||||
// --- HMAC ---
|
||||
let mut hmac_input = Vec::with_capacity(TLS_DIGEST_LEN + response.len());
|
||||
hmac_input.extend_from_slice(client_digest);
|
||||
hmac_input.extend_from_slice(&response);
|
||||
let digest = sha256_hmac(secret, &hmac_input);
|
||||
response[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN].copy_from_slice(&digest);
|
||||
|
||||
response
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::SystemTime;
|
||||
|
||||
use crate::tls_front::types::{CachedTlsData, ParsedServerHello, TlsCertPayload};
|
||||
|
||||
use super::build_emulated_server_hello;
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::protocol::constants::{
|
||||
TLS_RECORD_APPLICATION, TLS_RECORD_CHANGE_CIPHER, TLS_RECORD_HANDSHAKE,
|
||||
};
|
||||
|
||||
fn first_app_data_payload(response: &[u8]) -> &[u8] {
|
||||
let hello_len = u16::from_be_bytes([response[3], response[4]]) as usize;
|
||||
let ccs_start = 5 + hello_len;
|
||||
let ccs_len = u16::from_be_bytes([response[ccs_start + 3], response[ccs_start + 4]]) as usize;
|
||||
let app_start = ccs_start + 5 + ccs_len;
|
||||
let app_len = u16::from_be_bytes([response[app_start + 3], response[app_start + 4]]) as usize;
|
||||
&response[app_start + 5..app_start + 5 + app_len]
|
||||
}
|
||||
|
||||
fn make_cached(cert_payload: Option<TlsCertPayload>) -> CachedTlsData {
|
||||
CachedTlsData {
|
||||
server_hello_template: ParsedServerHello {
|
||||
version: [0x03, 0x03],
|
||||
random: [0u8; 32],
|
||||
session_id: Vec::new(),
|
||||
cipher_suite: [0x13, 0x01],
|
||||
compression: 0,
|
||||
extensions: Vec::new(),
|
||||
},
|
||||
cert_info: None,
|
||||
cert_payload,
|
||||
app_data_records_sizes: vec![64],
|
||||
total_app_data_len: 64,
|
||||
fetched_at: SystemTime::now(),
|
||||
domain: "example.com".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_emulated_server_hello_uses_cached_cert_payload() {
|
||||
let cert_msg = vec![0x0b, 0x00, 0x00, 0x05, 0x00, 0xaa, 0xbb, 0xcc, 0xdd];
|
||||
let cached = make_cached(Some(TlsCertPayload {
|
||||
cert_chain_der: vec![vec![0x30, 0x01, 0x00]],
|
||||
certificate_message: cert_msg.clone(),
|
||||
}));
|
||||
let rng = SecureRandom::new();
|
||||
let response = build_emulated_server_hello(
|
||||
b"secret",
|
||||
&[0x11; 32],
|
||||
&[0x22; 16],
|
||||
&cached,
|
||||
true,
|
||||
&rng,
|
||||
None,
|
||||
0,
|
||||
);
|
||||
|
||||
assert_eq!(response[0], TLS_RECORD_HANDSHAKE);
|
||||
let hello_len = u16::from_be_bytes([response[3], response[4]]) as usize;
|
||||
let ccs_start = 5 + hello_len;
|
||||
assert_eq!(response[ccs_start], TLS_RECORD_CHANGE_CIPHER);
|
||||
let app_start = ccs_start + 6;
|
||||
assert_eq!(response[app_start], TLS_RECORD_APPLICATION);
|
||||
|
||||
let payload = first_app_data_payload(&response);
|
||||
assert!(payload.starts_with(&cert_msg));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_emulated_server_hello_random_fallback_when_no_cert_payload() {
|
||||
let cached = make_cached(None);
|
||||
let rng = SecureRandom::new();
|
||||
let response = build_emulated_server_hello(
|
||||
b"secret",
|
||||
&[0x22; 32],
|
||||
&[0x33; 16],
|
||||
&cached,
|
||||
true,
|
||||
&rng,
|
||||
None,
|
||||
0,
|
||||
);
|
||||
|
||||
let payload = first_app_data_payload(&response);
|
||||
assert!(payload.len() >= 64);
|
||||
assert_eq!(payload[payload.len() - 17], 0x16);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_emulated_server_hello_uses_compact_payload_after_first() {
|
||||
let cert_msg = vec![0x0b, 0x00, 0x00, 0x05, 0x00, 0xaa, 0xbb, 0xcc, 0xdd];
|
||||
let mut cached = make_cached(Some(TlsCertPayload {
|
||||
cert_chain_der: vec![vec![0x30, 0x01, 0x00]],
|
||||
certificate_message: cert_msg,
|
||||
}));
|
||||
cached.cert_info = Some(crate::tls_front::types::ParsedCertificateInfo {
|
||||
not_after_unix: Some(1_900_000_000),
|
||||
not_before_unix: Some(1_700_000_000),
|
||||
issuer_cn: Some("Issuer".to_string()),
|
||||
subject_cn: Some("example.com".to_string()),
|
||||
san_names: vec!["example.com".to_string(), "www.example.com".to_string()],
|
||||
});
|
||||
|
||||
let rng = SecureRandom::new();
|
||||
let response = build_emulated_server_hello(
|
||||
b"secret",
|
||||
&[0x44; 32],
|
||||
&[0x55; 16],
|
||||
&cached,
|
||||
false,
|
||||
&rng,
|
||||
None,
|
||||
0,
|
||||
);
|
||||
|
||||
let payload = first_app_data_payload(&response);
|
||||
assert!(payload.starts_with(b"CN=example.com"));
|
||||
}
|
||||
}
|
||||
756
src/tls_front/fetcher.rs
Normal file
756
src/tls_front/fetcher.rs
Normal file
@@ -0,0 +1,756 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{Result, anyhow};
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
#[cfg(unix)]
|
||||
use tokio::net::UnixStream;
|
||||
use tokio::time::timeout;
|
||||
use tokio_rustls::client::TlsStream;
|
||||
use tokio_rustls::TlsConnector;
|
||||
use tracing::{debug, warn};
|
||||
|
||||
use rustls::client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier};
|
||||
use rustls::client::ClientConfig;
|
||||
use rustls::pki_types::{CertificateDer, ServerName, UnixTime};
|
||||
use rustls::{DigitallySignedStruct, Error as RustlsError};
|
||||
|
||||
use x509_parser::prelude::FromDer;
|
||||
use x509_parser::certificate::X509Certificate;
|
||||
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::network::dns_overrides::resolve_socket_addr;
|
||||
use crate::protocol::constants::{TLS_RECORD_APPLICATION, TLS_RECORD_HANDSHAKE};
|
||||
use crate::transport::proxy_protocol::{ProxyProtocolV1Builder, ProxyProtocolV2Builder};
|
||||
use crate::tls_front::types::{
|
||||
ParsedCertificateInfo,
|
||||
ParsedServerHello,
|
||||
TlsCertPayload,
|
||||
TlsExtension,
|
||||
TlsFetchResult,
|
||||
};
|
||||
|
||||
/// No-op verifier: accept any certificate (we only need lengths and metadata).
|
||||
#[derive(Debug)]
|
||||
struct NoVerify;
|
||||
|
||||
impl ServerCertVerifier for NoVerify {
|
||||
fn verify_server_cert(
|
||||
&self,
|
||||
_end_entity: &CertificateDer<'_>,
|
||||
_intermediates: &[CertificateDer<'_>],
|
||||
_server_name: &ServerName<'_>,
|
||||
_ocsp: &[u8],
|
||||
_now: UnixTime,
|
||||
) -> Result<ServerCertVerified, RustlsError> {
|
||||
Ok(ServerCertVerified::assertion())
|
||||
}
|
||||
|
||||
fn verify_tls12_signature(
|
||||
&self,
|
||||
_message: &[u8],
|
||||
_cert: &CertificateDer<'_>,
|
||||
_dss: &DigitallySignedStruct,
|
||||
) -> Result<HandshakeSignatureValid, RustlsError> {
|
||||
Ok(HandshakeSignatureValid::assertion())
|
||||
}
|
||||
|
||||
fn verify_tls13_signature(
|
||||
&self,
|
||||
_message: &[u8],
|
||||
_cert: &CertificateDer<'_>,
|
||||
_dss: &DigitallySignedStruct,
|
||||
) -> Result<HandshakeSignatureValid, RustlsError> {
|
||||
Ok(HandshakeSignatureValid::assertion())
|
||||
}
|
||||
|
||||
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
|
||||
use rustls::SignatureScheme::*;
|
||||
vec![
|
||||
RSA_PKCS1_SHA256,
|
||||
RSA_PSS_SHA256,
|
||||
ECDSA_NISTP256_SHA256,
|
||||
ECDSA_NISTP384_SHA384,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
fn build_client_config() -> Arc<ClientConfig> {
|
||||
let root = rustls::RootCertStore::empty();
|
||||
|
||||
let provider = rustls::crypto::ring::default_provider();
|
||||
let mut config = ClientConfig::builder_with_provider(Arc::new(provider))
|
||||
.with_protocol_versions(&[&rustls::version::TLS13, &rustls::version::TLS12])
|
||||
.expect("protocol versions")
|
||||
.with_root_certificates(root)
|
||||
.with_no_client_auth();
|
||||
|
||||
config
|
||||
.dangerous()
|
||||
.set_certificate_verifier(Arc::new(NoVerify));
|
||||
|
||||
Arc::new(config)
|
||||
}
|
||||
|
||||
fn build_client_hello(sni: &str, rng: &SecureRandom) -> Vec<u8> {
|
||||
// === ClientHello body ===
|
||||
let mut body = Vec::new();
|
||||
|
||||
// Legacy version (TLS 1.0) as in real ClientHello headers
|
||||
body.extend_from_slice(&[0x03, 0x03]);
|
||||
|
||||
// Random
|
||||
body.extend_from_slice(&rng.bytes(32));
|
||||
|
||||
// Session ID: empty
|
||||
body.push(0);
|
||||
|
||||
// Cipher suites (common minimal set, TLS1.3 + a few 1.2 fallbacks)
|
||||
let cipher_suites: [u8; 10] = [
|
||||
0x13, 0x01, // TLS_AES_128_GCM_SHA256
|
||||
0x13, 0x02, // TLS_AES_256_GCM_SHA384
|
||||
0x13, 0x03, // TLS_CHACHA20_POLY1305_SHA256
|
||||
0x00, 0x2f, // TLS_RSA_WITH_AES_128_CBC_SHA (legacy)
|
||||
0x00, 0xff, // RENEGOTIATION_INFO_SCSV
|
||||
];
|
||||
body.extend_from_slice(&(cipher_suites.len() as u16).to_be_bytes());
|
||||
body.extend_from_slice(&cipher_suites);
|
||||
|
||||
// Compression methods: null only
|
||||
body.push(1);
|
||||
body.push(0);
|
||||
|
||||
// === Extensions ===
|
||||
let mut exts = Vec::new();
|
||||
|
||||
// server_name (SNI)
|
||||
let sni_bytes = sni.as_bytes();
|
||||
let mut sni_ext = Vec::with_capacity(5 + sni_bytes.len());
|
||||
sni_ext.extend_from_slice(&(sni_bytes.len() as u16 + 3).to_be_bytes());
|
||||
sni_ext.push(0); // host_name
|
||||
sni_ext.extend_from_slice(&(sni_bytes.len() as u16).to_be_bytes());
|
||||
sni_ext.extend_from_slice(sni_bytes);
|
||||
exts.extend_from_slice(&0x0000u16.to_be_bytes());
|
||||
exts.extend_from_slice(&(sni_ext.len() as u16).to_be_bytes());
|
||||
exts.extend_from_slice(&sni_ext);
|
||||
|
||||
// supported_groups
|
||||
let groups: [u16; 2] = [0x001d, 0x0017]; // x25519, secp256r1
|
||||
exts.extend_from_slice(&0x000au16.to_be_bytes());
|
||||
exts.extend_from_slice(&((2 + groups.len() * 2) as u16).to_be_bytes());
|
||||
exts.extend_from_slice(&(groups.len() as u16 * 2).to_be_bytes());
|
||||
for g in groups { exts.extend_from_slice(&g.to_be_bytes()); }
|
||||
|
||||
// signature_algorithms
|
||||
let sig_algs: [u16; 4] = [0x0804, 0x0805, 0x0403, 0x0503]; // rsa_pss_rsae_sha256/384, ecdsa_secp256r1_sha256, rsa_pkcs1_sha256
|
||||
exts.extend_from_slice(&0x000du16.to_be_bytes());
|
||||
exts.extend_from_slice(&((2 + sig_algs.len() * 2) as u16).to_be_bytes());
|
||||
exts.extend_from_slice(&(sig_algs.len() as u16 * 2).to_be_bytes());
|
||||
for a in sig_algs { exts.extend_from_slice(&a.to_be_bytes()); }
|
||||
|
||||
// supported_versions (TLS1.3 + TLS1.2)
|
||||
let versions: [u16; 2] = [0x0304, 0x0303];
|
||||
exts.extend_from_slice(&0x002bu16.to_be_bytes());
|
||||
exts.extend_from_slice(&((1 + versions.len() * 2) as u16).to_be_bytes());
|
||||
exts.push((versions.len() * 2) as u8);
|
||||
for v in versions { exts.extend_from_slice(&v.to_be_bytes()); }
|
||||
|
||||
// key_share (x25519)
|
||||
let key = gen_key_share(rng);
|
||||
let mut keyshare = Vec::with_capacity(4 + key.len());
|
||||
keyshare.extend_from_slice(&0x001du16.to_be_bytes()); // group
|
||||
keyshare.extend_from_slice(&(key.len() as u16).to_be_bytes());
|
||||
keyshare.extend_from_slice(&key);
|
||||
exts.extend_from_slice(&0x0033u16.to_be_bytes());
|
||||
exts.extend_from_slice(&((2 + keyshare.len()) as u16).to_be_bytes());
|
||||
exts.extend_from_slice(&(keyshare.len() as u16).to_be_bytes());
|
||||
exts.extend_from_slice(&keyshare);
|
||||
|
||||
// ALPN (http/1.1)
|
||||
let alpn_proto = b"http/1.1";
|
||||
exts.extend_from_slice(&0x0010u16.to_be_bytes());
|
||||
exts.extend_from_slice(&((2 + 1 + alpn_proto.len()) as u16).to_be_bytes());
|
||||
exts.extend_from_slice(&((1 + alpn_proto.len()) as u16).to_be_bytes());
|
||||
exts.push(alpn_proto.len() as u8);
|
||||
exts.extend_from_slice(alpn_proto);
|
||||
|
||||
// padding to reduce recognizability and keep length ~500 bytes
|
||||
const TARGET_EXT_LEN: usize = 180;
|
||||
if exts.len() < TARGET_EXT_LEN {
|
||||
let remaining = TARGET_EXT_LEN - exts.len();
|
||||
if remaining > 4 {
|
||||
let pad_len = remaining - 4; // minus type+len
|
||||
exts.extend_from_slice(&0x0015u16.to_be_bytes()); // padding extension
|
||||
exts.extend_from_slice(&(pad_len as u16).to_be_bytes());
|
||||
exts.resize(exts.len() + pad_len, 0);
|
||||
}
|
||||
}
|
||||
|
||||
// Extensions length prefix
|
||||
body.extend_from_slice(&(exts.len() as u16).to_be_bytes());
|
||||
body.extend_from_slice(&exts);
|
||||
|
||||
// === Handshake wrapper ===
|
||||
let mut handshake = Vec::new();
|
||||
handshake.push(0x01); // ClientHello
|
||||
let len_bytes = (body.len() as u32).to_be_bytes();
|
||||
handshake.extend_from_slice(&len_bytes[1..4]);
|
||||
handshake.extend_from_slice(&body);
|
||||
|
||||
// === Record ===
|
||||
let mut record = Vec::new();
|
||||
record.push(TLS_RECORD_HANDSHAKE);
|
||||
record.extend_from_slice(&[0x03, 0x01]); // legacy record version
|
||||
record.extend_from_slice(&(handshake.len() as u16).to_be_bytes());
|
||||
record.extend_from_slice(&handshake);
|
||||
|
||||
record
|
||||
}
|
||||
|
||||
fn gen_key_share(rng: &SecureRandom) -> [u8; 32] {
|
||||
let mut key = [0u8; 32];
|
||||
key.copy_from_slice(&rng.bytes(32));
|
||||
key
|
||||
}
|
||||
|
||||
async fn read_tls_record<S>(stream: &mut S) -> Result<(u8, Vec<u8>)>
|
||||
where
|
||||
S: AsyncRead + Unpin,
|
||||
{
|
||||
let mut header = [0u8; 5];
|
||||
stream.read_exact(&mut header).await?;
|
||||
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
|
||||
let mut body = vec![0u8; len];
|
||||
stream.read_exact(&mut body).await?;
|
||||
Ok((header[0], body))
|
||||
}
|
||||
|
||||
fn parse_server_hello(body: &[u8]) -> Option<ParsedServerHello> {
|
||||
if body.len() < 4 || body[0] != 0x02 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let msg_len = u32::from_be_bytes([0, body[1], body[2], body[3]]) as usize;
|
||||
if msg_len + 4 > body.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut pos = 4;
|
||||
let version = [*body.get(pos)?, *body.get(pos + 1)?];
|
||||
pos += 2;
|
||||
|
||||
let mut random = [0u8; 32];
|
||||
random.copy_from_slice(body.get(pos..pos + 32)?);
|
||||
pos += 32;
|
||||
|
||||
let session_len = *body.get(pos)? as usize;
|
||||
pos += 1;
|
||||
let session_id = body.get(pos..pos + session_len)?.to_vec();
|
||||
pos += session_len;
|
||||
|
||||
let cipher_suite = [*body.get(pos)?, *body.get(pos + 1)?];
|
||||
pos += 2;
|
||||
|
||||
let compression = *body.get(pos)?;
|
||||
pos += 1;
|
||||
|
||||
let ext_len = u16::from_be_bytes([*body.get(pos)?, *body.get(pos + 1)?]) as usize;
|
||||
pos += 2;
|
||||
let ext_end = pos.checked_add(ext_len)?;
|
||||
if ext_end > body.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut extensions = Vec::new();
|
||||
while pos + 4 <= ext_end {
|
||||
let etype = u16::from_be_bytes([body[pos], body[pos + 1]]);
|
||||
let elen = u16::from_be_bytes([body[pos + 2], body[pos + 3]]) as usize;
|
||||
pos += 4;
|
||||
let data = body.get(pos..pos + elen)?.to_vec();
|
||||
pos += elen;
|
||||
extensions.push(TlsExtension { ext_type: etype, data });
|
||||
}
|
||||
|
||||
Some(ParsedServerHello {
|
||||
version,
|
||||
random,
|
||||
session_id,
|
||||
cipher_suite,
|
||||
compression,
|
||||
extensions,
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_cert_info(certs: &[CertificateDer<'static>]) -> Option<ParsedCertificateInfo> {
|
||||
let first = certs.first()?;
|
||||
let (_rem, cert) = X509Certificate::from_der(first.as_ref()).ok()?;
|
||||
|
||||
let not_before = Some(cert.validity().not_before.to_datetime().unix_timestamp());
|
||||
let not_after = Some(cert.validity().not_after.to_datetime().unix_timestamp());
|
||||
|
||||
let issuer_cn = cert
|
||||
.issuer()
|
||||
.iter_common_name()
|
||||
.next()
|
||||
.and_then(|cn| cn.as_str().ok())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
let subject_cn = cert
|
||||
.subject()
|
||||
.iter_common_name()
|
||||
.next()
|
||||
.and_then(|cn| cn.as_str().ok())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
let san_names = cert
|
||||
.subject_alternative_name()
|
||||
.ok()
|
||||
.flatten()
|
||||
.map(|san| {
|
||||
san.value
|
||||
.general_names
|
||||
.iter()
|
||||
.filter_map(|gn| match gn {
|
||||
x509_parser::extensions::GeneralName::DNSName(n) => Some(n.to_string()),
|
||||
_ => None,
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
Some(ParsedCertificateInfo {
|
||||
not_after_unix: not_after,
|
||||
not_before_unix: not_before,
|
||||
issuer_cn,
|
||||
subject_cn,
|
||||
san_names,
|
||||
})
|
||||
}
|
||||
|
||||
fn u24_bytes(value: usize) -> Option<[u8; 3]> {
|
||||
if value > 0x00ff_ffff {
|
||||
return None;
|
||||
}
|
||||
Some([
|
||||
((value >> 16) & 0xff) as u8,
|
||||
((value >> 8) & 0xff) as u8,
|
||||
(value & 0xff) as u8,
|
||||
])
|
||||
}
|
||||
|
||||
async fn connect_with_dns_override(
|
||||
host: &str,
|
||||
port: u16,
|
||||
connect_timeout: Duration,
|
||||
) -> Result<TcpStream> {
|
||||
if let Some(addr) = resolve_socket_addr(host, port) {
|
||||
return Ok(timeout(connect_timeout, TcpStream::connect(addr)).await??);
|
||||
}
|
||||
Ok(timeout(connect_timeout, TcpStream::connect((host, port))).await??)
|
||||
}
|
||||
|
||||
async fn connect_tcp_with_upstream(
|
||||
host: &str,
|
||||
port: u16,
|
||||
connect_timeout: Duration,
|
||||
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
|
||||
) -> Result<TcpStream> {
|
||||
if let Some(manager) = upstream {
|
||||
if let Some(addr) = resolve_socket_addr(host, port) {
|
||||
match manager.connect(addr, None, None).await {
|
||||
Ok(stream) => return Ok(stream),
|
||||
Err(e) => {
|
||||
warn!(
|
||||
host = %host,
|
||||
port = port,
|
||||
error = %e,
|
||||
"Upstream connect failed, using direct connect"
|
||||
);
|
||||
}
|
||||
}
|
||||
} else if let Ok(mut addrs) = tokio::net::lookup_host((host, port)).await {
|
||||
if let Some(addr) = addrs.find(|a| a.is_ipv4()) {
|
||||
match manager.connect(addr, None, None).await {
|
||||
Ok(stream) => return Ok(stream),
|
||||
Err(e) => {
|
||||
warn!(
|
||||
host = %host,
|
||||
port = port,
|
||||
error = %e,
|
||||
"Upstream connect failed, using direct connect"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
connect_with_dns_override(host, port, connect_timeout).await
|
||||
}
|
||||
|
||||
fn encode_tls13_certificate_message(cert_chain_der: &[Vec<u8>]) -> Option<Vec<u8>> {
|
||||
if cert_chain_der.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut certificate_list = Vec::new();
|
||||
for cert in cert_chain_der {
|
||||
if cert.is_empty() {
|
||||
return None;
|
||||
}
|
||||
certificate_list.extend_from_slice(&u24_bytes(cert.len())?);
|
||||
certificate_list.extend_from_slice(cert);
|
||||
certificate_list.extend_from_slice(&0u16.to_be_bytes()); // cert_entry extensions
|
||||
}
|
||||
|
||||
// Certificate = context_len(1) + certificate_list_len(3) + entries
|
||||
let body_len = 1usize
|
||||
.checked_add(3)?
|
||||
.checked_add(certificate_list.len())?;
|
||||
|
||||
let mut message = Vec::with_capacity(4 + body_len);
|
||||
message.push(0x0b); // HandshakeType::certificate
|
||||
message.extend_from_slice(&u24_bytes(body_len)?);
|
||||
message.push(0x00); // certificate_request_context length
|
||||
message.extend_from_slice(&u24_bytes(certificate_list.len())?);
|
||||
message.extend_from_slice(&certificate_list);
|
||||
Some(message)
|
||||
}
|
||||
|
||||
async fn fetch_via_raw_tls_stream<S>(
|
||||
mut stream: S,
|
||||
sni: &str,
|
||||
connect_timeout: Duration,
|
||||
proxy_protocol: u8,
|
||||
) -> Result<TlsFetchResult>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
let rng = SecureRandom::new();
|
||||
let client_hello = build_client_hello(sni, &rng);
|
||||
timeout(connect_timeout, async {
|
||||
if proxy_protocol > 0 {
|
||||
let header = match proxy_protocol {
|
||||
2 => ProxyProtocolV2Builder::new().build(),
|
||||
_ => ProxyProtocolV1Builder::new().build(),
|
||||
};
|
||||
stream.write_all(&header).await?;
|
||||
}
|
||||
stream.write_all(&client_hello).await?;
|
||||
stream.flush().await?;
|
||||
Ok::<(), std::io::Error>(())
|
||||
})
|
||||
.await??;
|
||||
|
||||
let mut records = Vec::new();
|
||||
// Read up to 4 records: ServerHello, CCS, and up to two ApplicationData.
|
||||
for _ in 0..4 {
|
||||
match timeout(connect_timeout, read_tls_record(&mut stream)).await {
|
||||
Ok(Ok(rec)) => records.push(rec),
|
||||
Ok(Err(e)) => return Err(e),
|
||||
Err(_) => break,
|
||||
}
|
||||
if records.len() >= 3 && records.iter().any(|(t, _)| *t == TLS_RECORD_APPLICATION) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let mut app_sizes = Vec::new();
|
||||
let mut server_hello = None;
|
||||
for (t, body) in &records {
|
||||
if *t == TLS_RECORD_HANDSHAKE && server_hello.is_none() {
|
||||
server_hello = parse_server_hello(body);
|
||||
} else if *t == TLS_RECORD_APPLICATION {
|
||||
app_sizes.push(body.len());
|
||||
}
|
||||
}
|
||||
|
||||
let parsed = server_hello.ok_or_else(|| anyhow!("ServerHello not received"))?;
|
||||
let total_app_data_len = app_sizes.iter().sum::<usize>().max(1024);
|
||||
|
||||
Ok(TlsFetchResult {
|
||||
server_hello_parsed: parsed,
|
||||
app_data_records_sizes: if app_sizes.is_empty() {
|
||||
vec![total_app_data_len]
|
||||
} else {
|
||||
app_sizes
|
||||
},
|
||||
total_app_data_len,
|
||||
cert_info: None,
|
||||
cert_payload: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn fetch_via_raw_tls(
|
||||
host: &str,
|
||||
port: u16,
|
||||
sni: &str,
|
||||
connect_timeout: Duration,
|
||||
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
|
||||
proxy_protocol: u8,
|
||||
unix_sock: Option<&str>,
|
||||
) -> Result<TlsFetchResult> {
|
||||
#[cfg(unix)]
|
||||
if let Some(sock_path) = unix_sock {
|
||||
match timeout(connect_timeout, UnixStream::connect(sock_path)).await {
|
||||
Ok(Ok(stream)) => {
|
||||
debug!(
|
||||
sni = %sni,
|
||||
sock = %sock_path,
|
||||
"Raw TLS fetch using mask unix socket"
|
||||
);
|
||||
return fetch_via_raw_tls_stream(stream, sni, connect_timeout, proxy_protocol).await;
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
warn!(
|
||||
sni = %sni,
|
||||
sock = %sock_path,
|
||||
error = %e,
|
||||
"Raw TLS unix socket connect failed, falling back to TCP"
|
||||
);
|
||||
}
|
||||
Err(_) => {
|
||||
warn!(
|
||||
sni = %sni,
|
||||
sock = %sock_path,
|
||||
"Raw TLS unix socket connect timed out, falling back to TCP"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
let _ = unix_sock;
|
||||
|
||||
let stream = connect_tcp_with_upstream(host, port, connect_timeout, upstream).await?;
|
||||
fetch_via_raw_tls_stream(stream, sni, connect_timeout, proxy_protocol).await
|
||||
}
|
||||
|
||||
async fn fetch_via_rustls_stream<S>(
|
||||
mut stream: S,
|
||||
host: &str,
|
||||
sni: &str,
|
||||
proxy_protocol: u8,
|
||||
) -> Result<TlsFetchResult>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
// rustls handshake path for certificate and basic negotiated metadata.
|
||||
if proxy_protocol > 0 {
|
||||
let header = match proxy_protocol {
|
||||
2 => ProxyProtocolV2Builder::new().build(),
|
||||
_ => ProxyProtocolV1Builder::new().build(),
|
||||
};
|
||||
stream.write_all(&header).await?;
|
||||
stream.flush().await?;
|
||||
}
|
||||
|
||||
let config = build_client_config();
|
||||
let connector = TlsConnector::from(config);
|
||||
|
||||
let server_name = ServerName::try_from(sni.to_owned())
|
||||
.or_else(|_| ServerName::try_from(host.to_owned()))
|
||||
.map_err(|_| RustlsError::General("invalid SNI".into()))?;
|
||||
|
||||
let tls_stream: TlsStream<S> = connector.connect(server_name, stream).await?;
|
||||
|
||||
// Extract negotiated parameters and certificates
|
||||
let (_io, session) = tls_stream.get_ref();
|
||||
let cipher_suite = session
|
||||
.negotiated_cipher_suite()
|
||||
.map(|s| u16::from(s.suite()).to_be_bytes())
|
||||
.unwrap_or([0x13, 0x01]);
|
||||
|
||||
let certs: Vec<CertificateDer<'static>> = session
|
||||
.peer_certificates()
|
||||
.map(|slice| slice.to_vec())
|
||||
.unwrap_or_default();
|
||||
let cert_chain_der: Vec<Vec<u8>> = certs.iter().map(|c| c.as_ref().to_vec()).collect();
|
||||
let cert_payload = encode_tls13_certificate_message(&cert_chain_der).map(|certificate_message| {
|
||||
TlsCertPayload {
|
||||
cert_chain_der: cert_chain_der.clone(),
|
||||
certificate_message,
|
||||
}
|
||||
});
|
||||
|
||||
let total_cert_len = cert_payload
|
||||
.as_ref()
|
||||
.map(|payload| payload.certificate_message.len())
|
||||
.unwrap_or_else(|| cert_chain_der.iter().map(Vec::len).sum::<usize>())
|
||||
.max(1024);
|
||||
let cert_info = parse_cert_info(&certs);
|
||||
|
||||
// Heuristic: split across two records if large to mimic real servers a bit.
|
||||
let app_data_records_sizes = if total_cert_len > 3000 {
|
||||
vec![total_cert_len / 2, total_cert_len - total_cert_len / 2]
|
||||
} else {
|
||||
vec![total_cert_len]
|
||||
};
|
||||
|
||||
let parsed = ParsedServerHello {
|
||||
version: [0x03, 0x03],
|
||||
random: [0u8; 32],
|
||||
session_id: Vec::new(),
|
||||
cipher_suite,
|
||||
compression: 0,
|
||||
extensions: Vec::new(),
|
||||
};
|
||||
|
||||
debug!(
|
||||
sni = %sni,
|
||||
len = total_cert_len,
|
||||
cipher = format!("0x{:04x}", u16::from_be_bytes(cipher_suite)),
|
||||
has_cert_payload = cert_payload.is_some(),
|
||||
"Fetched TLS metadata via rustls"
|
||||
);
|
||||
|
||||
Ok(TlsFetchResult {
|
||||
server_hello_parsed: parsed,
|
||||
app_data_records_sizes: app_data_records_sizes.clone(),
|
||||
total_app_data_len: app_data_records_sizes.iter().sum(),
|
||||
cert_info,
|
||||
cert_payload,
|
||||
})
|
||||
}
|
||||
|
||||
async fn fetch_via_rustls(
|
||||
host: &str,
|
||||
port: u16,
|
||||
sni: &str,
|
||||
connect_timeout: Duration,
|
||||
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
|
||||
proxy_protocol: u8,
|
||||
unix_sock: Option<&str>,
|
||||
) -> Result<TlsFetchResult> {
|
||||
#[cfg(unix)]
|
||||
if let Some(sock_path) = unix_sock {
|
||||
match timeout(connect_timeout, UnixStream::connect(sock_path)).await {
|
||||
Ok(Ok(stream)) => {
|
||||
debug!(
|
||||
sni = %sni,
|
||||
sock = %sock_path,
|
||||
"Rustls fetch using mask unix socket"
|
||||
);
|
||||
return fetch_via_rustls_stream(stream, host, sni, proxy_protocol).await;
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
warn!(
|
||||
sni = %sni,
|
||||
sock = %sock_path,
|
||||
error = %e,
|
||||
"Rustls unix socket connect failed, falling back to TCP"
|
||||
);
|
||||
}
|
||||
Err(_) => {
|
||||
warn!(
|
||||
sni = %sni,
|
||||
sock = %sock_path,
|
||||
"Rustls unix socket connect timed out, falling back to TCP"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
let _ = unix_sock;
|
||||
|
||||
let stream = connect_tcp_with_upstream(host, port, connect_timeout, upstream).await?;
|
||||
fetch_via_rustls_stream(stream, host, sni, proxy_protocol).await
|
||||
}
|
||||
|
||||
/// Fetch real TLS metadata for the given SNI.
|
||||
///
|
||||
/// Strategy:
|
||||
/// 1) Probe raw TLS for realistic ServerHello and ApplicationData record sizes.
|
||||
/// 2) Fetch certificate chain via rustls to build cert payload.
|
||||
/// 3) Merge both when possible; otherwise auto-fallback to whichever succeeded.
|
||||
pub async fn fetch_real_tls(
|
||||
host: &str,
|
||||
port: u16,
|
||||
sni: &str,
|
||||
connect_timeout: Duration,
|
||||
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
|
||||
proxy_protocol: u8,
|
||||
unix_sock: Option<&str>,
|
||||
) -> Result<TlsFetchResult> {
|
||||
let raw_result = match fetch_via_raw_tls(
|
||||
host,
|
||||
port,
|
||||
sni,
|
||||
connect_timeout,
|
||||
upstream.clone(),
|
||||
proxy_protocol,
|
||||
unix_sock,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(res) => Some(res),
|
||||
Err(e) => {
|
||||
warn!(sni = %sni, error = %e, "Raw TLS fetch failed");
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
match fetch_via_rustls(
|
||||
host,
|
||||
port,
|
||||
sni,
|
||||
connect_timeout,
|
||||
upstream,
|
||||
proxy_protocol,
|
||||
unix_sock,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(rustls_result) => {
|
||||
if let Some(mut raw) = raw_result {
|
||||
raw.cert_info = rustls_result.cert_info;
|
||||
raw.cert_payload = rustls_result.cert_payload;
|
||||
debug!(sni = %sni, "Fetched TLS metadata via raw probe + rustls cert chain");
|
||||
Ok(raw)
|
||||
} else {
|
||||
Ok(rustls_result)
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
if let Some(raw) = raw_result {
|
||||
warn!(sni = %sni, error = %e, "Rustls cert fetch failed, using raw TLS metadata only");
|
||||
Ok(raw)
|
||||
} else {
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::encode_tls13_certificate_message;
|
||||
|
||||
fn read_u24(bytes: &[u8]) -> usize {
|
||||
((bytes[0] as usize) << 16) | ((bytes[1] as usize) << 8) | (bytes[2] as usize)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encode_tls13_certificate_message_single_cert() {
|
||||
let cert = vec![0x30, 0x03, 0x02, 0x01, 0x01];
|
||||
let message = encode_tls13_certificate_message(&[cert.clone()]).expect("message");
|
||||
|
||||
assert_eq!(message[0], 0x0b);
|
||||
assert_eq!(read_u24(&message[1..4]), message.len() - 4);
|
||||
assert_eq!(message[4], 0x00);
|
||||
|
||||
let cert_list_len = read_u24(&message[5..8]);
|
||||
assert_eq!(cert_list_len, cert.len() + 5);
|
||||
|
||||
let cert_len = read_u24(&message[8..11]);
|
||||
assert_eq!(cert_len, cert.len());
|
||||
assert_eq!(&message[11..11 + cert.len()], cert.as_slice());
|
||||
assert_eq!(&message[11 + cert.len()..13 + cert.len()], &[0x00, 0x00]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encode_tls13_certificate_message_empty_chain() {
|
||||
assert!(encode_tls13_certificate_message(&[]).is_none());
|
||||
}
|
||||
}
|
||||
8
src/tls_front/mod.rs
Normal file
8
src/tls_front/mod.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
pub mod types;
|
||||
pub mod cache;
|
||||
pub mod fetcher;
|
||||
pub mod emulator;
|
||||
|
||||
pub use cache::TlsFrontCache;
|
||||
#[allow(unused_imports)]
|
||||
pub use types::{CachedTlsData, TlsFetchResult};
|
||||
68
src/tls_front/types.rs
Normal file
68
src/tls_front/types.rs
Normal file
@@ -0,0 +1,68 @@
|
||||
use std::time::SystemTime;
|
||||
use serde::{Serialize, Deserialize};
|
||||
|
||||
/// Parsed representation of an unencrypted TLS ServerHello.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ParsedServerHello {
|
||||
pub version: [u8; 2],
|
||||
pub random: [u8; 32],
|
||||
pub session_id: Vec<u8>,
|
||||
pub cipher_suite: [u8; 2],
|
||||
pub compression: u8,
|
||||
pub extensions: Vec<TlsExtension>,
|
||||
}
|
||||
|
||||
/// Generic TLS extension container.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TlsExtension {
|
||||
pub ext_type: u16,
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
/// Basic certificate metadata (optional, informative).
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ParsedCertificateInfo {
|
||||
pub not_after_unix: Option<i64>,
|
||||
pub not_before_unix: Option<i64>,
|
||||
pub issuer_cn: Option<String>,
|
||||
pub subject_cn: Option<String>,
|
||||
pub san_names: Vec<String>,
|
||||
}
|
||||
|
||||
/// TLS certificate payload captured from profiled upstream.
|
||||
///
|
||||
/// `certificate_message` stores an encoded TLS 1.3 Certificate handshake
|
||||
/// message body that can be replayed as opaque ApplicationData bytes in FakeTLS.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TlsCertPayload {
|
||||
pub cert_chain_der: Vec<Vec<u8>>,
|
||||
pub certificate_message: Vec<u8>,
|
||||
}
|
||||
|
||||
/// Cached data per SNI used by the emulator.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CachedTlsData {
|
||||
pub server_hello_template: ParsedServerHello,
|
||||
pub cert_info: Option<ParsedCertificateInfo>,
|
||||
#[serde(default)]
|
||||
pub cert_payload: Option<TlsCertPayload>,
|
||||
pub app_data_records_sizes: Vec<usize>,
|
||||
pub total_app_data_len: usize,
|
||||
#[serde(default = "now_system_time", skip_serializing, skip_deserializing)]
|
||||
pub fetched_at: SystemTime,
|
||||
pub domain: String,
|
||||
}
|
||||
|
||||
fn now_system_time() -> SystemTime {
|
||||
SystemTime::now()
|
||||
}
|
||||
|
||||
/// Result of attempting to fetch real TLS artifacts.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TlsFetchResult {
|
||||
pub server_hello_parsed: ParsedServerHello,
|
||||
pub app_data_records_sizes: Vec<usize>,
|
||||
pub total_app_data_len: usize,
|
||||
pub cert_info: Option<ParsedCertificateInfo>,
|
||||
pub cert_payload: Option<TlsCertPayload>,
|
||||
}
|
||||
250
src/transport/middle_proxy/codec.rs
Normal file
250
src/transport/middle_proxy/codec.rs
Normal file
@@ -0,0 +1,250 @@
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use bytes::Bytes;
|
||||
|
||||
use crate::crypto::{AesCbc, crc32, crc32c};
|
||||
use crate::error::{ProxyError, Result};
|
||||
use crate::protocol::constants::*;
|
||||
|
||||
/// Commands sent to dedicated writer tasks to avoid mutex contention on TCP writes.
|
||||
pub(crate) enum WriterCommand {
|
||||
Data(Bytes),
|
||||
DataAndFlush(Bytes),
|
||||
Close,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub(crate) enum RpcChecksumMode {
|
||||
Crc32,
|
||||
Crc32c,
|
||||
}
|
||||
|
||||
impl RpcChecksumMode {
|
||||
pub(crate) fn from_handshake_flags(flags: u32) -> Self {
|
||||
if (flags & rpc_crypto_flags::USE_CRC32C) != 0 {
|
||||
Self::Crc32c
|
||||
} else {
|
||||
Self::Crc32
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn advertised_flags(self) -> u32 {
|
||||
match self {
|
||||
Self::Crc32 => 0,
|
||||
Self::Crc32c => rpc_crypto_flags::USE_CRC32C,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn rpc_crc(mode: RpcChecksumMode, data: &[u8]) -> u32 {
|
||||
match mode {
|
||||
RpcChecksumMode::Crc32 => crc32(data),
|
||||
RpcChecksumMode::Crc32c => crc32c(data),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn build_rpc_frame(seq_no: i32, payload: &[u8], crc_mode: RpcChecksumMode) -> Vec<u8> {
|
||||
let total_len = (4 + 4 + payload.len() + 4) as u32;
|
||||
let mut frame = Vec::with_capacity(total_len as usize);
|
||||
frame.extend_from_slice(&total_len.to_le_bytes());
|
||||
frame.extend_from_slice(&seq_no.to_le_bytes());
|
||||
frame.extend_from_slice(payload);
|
||||
let c = rpc_crc(crc_mode, &frame);
|
||||
frame.extend_from_slice(&c.to_le_bytes());
|
||||
frame
|
||||
}
|
||||
|
||||
pub(crate) async fn read_rpc_frame_plaintext(
|
||||
rd: &mut (impl AsyncReadExt + Unpin),
|
||||
) -> Result<(i32, Vec<u8>)> {
|
||||
let mut len_buf = [0u8; 4];
|
||||
rd.read_exact(&mut len_buf).await.map_err(ProxyError::Io)?;
|
||||
let total_len = u32::from_le_bytes(len_buf) as usize;
|
||||
|
||||
if !(12..=(1 << 24)).contains(&total_len) {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"Bad RPC frame length: {total_len}"
|
||||
)));
|
||||
}
|
||||
|
||||
let mut rest = vec![0u8; total_len - 4];
|
||||
rd.read_exact(&mut rest).await.map_err(ProxyError::Io)?;
|
||||
|
||||
let mut full = Vec::with_capacity(total_len);
|
||||
full.extend_from_slice(&len_buf);
|
||||
full.extend_from_slice(&rest);
|
||||
|
||||
let crc_offset = total_len - 4;
|
||||
let expected_crc = u32::from_le_bytes(full[crc_offset..crc_offset + 4].try_into().unwrap());
|
||||
let actual_crc = rpc_crc(RpcChecksumMode::Crc32, &full[..crc_offset]);
|
||||
if expected_crc != actual_crc {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"CRC mismatch: 0x{expected_crc:08x} vs 0x{actual_crc:08x}"
|
||||
)));
|
||||
}
|
||||
|
||||
let seq_no = i32::from_le_bytes(full[4..8].try_into().unwrap());
|
||||
let payload = full[8..crc_offset].to_vec();
|
||||
Ok((seq_no, payload))
|
||||
}
|
||||
|
||||
pub(crate) fn build_nonce_payload(key_selector: u32, crypto_ts: u32, nonce: &[u8; 16]) -> [u8; 32] {
|
||||
let mut p = [0u8; 32];
|
||||
p[0..4].copy_from_slice(&RPC_NONCE_U32.to_le_bytes());
|
||||
p[4..8].copy_from_slice(&key_selector.to_le_bytes());
|
||||
p[8..12].copy_from_slice(&RPC_CRYPTO_AES_U32.to_le_bytes());
|
||||
p[12..16].copy_from_slice(&crypto_ts.to_le_bytes());
|
||||
p[16..32].copy_from_slice(nonce);
|
||||
p
|
||||
}
|
||||
|
||||
pub(crate) fn parse_nonce_payload(d: &[u8]) -> Result<(u32, u32, u32, [u8; 16])> {
|
||||
if d.len() < 32 {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"Nonce payload too short: {} bytes",
|
||||
d.len()
|
||||
)));
|
||||
}
|
||||
|
||||
let t = u32::from_le_bytes(d[0..4].try_into().unwrap());
|
||||
if t != RPC_NONCE_U32 {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"Expected RPC_NONCE 0x{RPC_NONCE_U32:08x}, got 0x{t:08x}"
|
||||
)));
|
||||
}
|
||||
|
||||
let key_select = u32::from_le_bytes(d[4..8].try_into().unwrap());
|
||||
let schema = u32::from_le_bytes(d[8..12].try_into().unwrap());
|
||||
let ts = u32::from_le_bytes(d[12..16].try_into().unwrap());
|
||||
let mut nonce = [0u8; 16];
|
||||
nonce.copy_from_slice(&d[16..32]);
|
||||
Ok((key_select, schema, ts, nonce))
|
||||
}
|
||||
|
||||
pub(crate) fn build_handshake_payload(
|
||||
our_ip: [u8; 4],
|
||||
our_port: u16,
|
||||
peer_ip: [u8; 4],
|
||||
peer_port: u16,
|
||||
flags: u32,
|
||||
) -> [u8; 32] {
|
||||
let mut p = [0u8; 32];
|
||||
p[0..4].copy_from_slice(&RPC_HANDSHAKE_U32.to_le_bytes());
|
||||
p[4..8].copy_from_slice(&flags.to_le_bytes());
|
||||
|
||||
// process_id sender_pid
|
||||
p[8..12].copy_from_slice(&our_ip);
|
||||
p[12..14].copy_from_slice(&our_port.to_le_bytes());
|
||||
p[14..16].copy_from_slice(&process_pid16().to_le_bytes());
|
||||
p[16..20].copy_from_slice(&process_utime().to_le_bytes());
|
||||
|
||||
// process_id peer_pid
|
||||
p[20..24].copy_from_slice(&peer_ip);
|
||||
p[24..26].copy_from_slice(&peer_port.to_le_bytes());
|
||||
p[26..28].copy_from_slice(&0u16.to_le_bytes());
|
||||
p[28..32].copy_from_slice(&0u32.to_le_bytes());
|
||||
p
|
||||
}
|
||||
|
||||
pub(crate) fn parse_handshake_flags(payload: &[u8]) -> Result<u32> {
|
||||
if payload.len() != 32 {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"Bad handshake payload len: {}",
|
||||
payload.len()
|
||||
)));
|
||||
}
|
||||
let hs_type = u32::from_le_bytes(payload[0..4].try_into().unwrap());
|
||||
if hs_type != RPC_HANDSHAKE_U32 {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"Expected HANDSHAKE 0x{RPC_HANDSHAKE_U32:08x}, got 0x{hs_type:08x}"
|
||||
)));
|
||||
}
|
||||
Ok(u32::from_le_bytes(payload[4..8].try_into().unwrap()))
|
||||
}
|
||||
|
||||
fn process_pid16() -> u16 {
|
||||
(std::process::id() & 0xffff) as u16
|
||||
}
|
||||
|
||||
fn process_utime() -> u32 {
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs() as u32
|
||||
}
|
||||
|
||||
pub(crate) fn cbc_encrypt_padded(
|
||||
key: &[u8; 32],
|
||||
iv: &[u8; 16],
|
||||
plaintext: &[u8],
|
||||
) -> Result<(Vec<u8>, [u8; 16])> {
|
||||
let pad = (16 - (plaintext.len() % 16)) % 16;
|
||||
let mut buf = plaintext.to_vec();
|
||||
let pad_pattern: [u8; 4] = [0x04, 0x00, 0x00, 0x00];
|
||||
for i in 0..pad {
|
||||
buf.push(pad_pattern[i % 4]);
|
||||
}
|
||||
|
||||
let cipher = AesCbc::new(*key, *iv);
|
||||
cipher
|
||||
.encrypt_in_place(&mut buf)
|
||||
.map_err(|e| ProxyError::Crypto(format!("CBC encrypt: {e}")))?;
|
||||
|
||||
let mut new_iv = [0u8; 16];
|
||||
if buf.len() >= 16 {
|
||||
new_iv.copy_from_slice(&buf[buf.len() - 16..]);
|
||||
}
|
||||
Ok((buf, new_iv))
|
||||
}
|
||||
|
||||
pub(crate) fn cbc_decrypt_inplace(
|
||||
key: &[u8; 32],
|
||||
iv: &[u8; 16],
|
||||
data: &mut [u8],
|
||||
) -> Result<[u8; 16]> {
|
||||
let mut new_iv = [0u8; 16];
|
||||
if data.len() >= 16 {
|
||||
new_iv.copy_from_slice(&data[data.len() - 16..]);
|
||||
}
|
||||
|
||||
AesCbc::new(*key, *iv)
|
||||
.decrypt_in_place(data)
|
||||
.map_err(|e| ProxyError::Crypto(format!("CBC decrypt: {e}")))?;
|
||||
Ok(new_iv)
|
||||
}
|
||||
|
||||
pub(crate) struct RpcWriter {
|
||||
pub(crate) writer: tokio::io::WriteHalf<tokio::net::TcpStream>,
|
||||
pub(crate) key: [u8; 32],
|
||||
pub(crate) iv: [u8; 16],
|
||||
pub(crate) seq_no: i32,
|
||||
pub(crate) crc_mode: RpcChecksumMode,
|
||||
}
|
||||
|
||||
impl RpcWriter {
|
||||
pub(crate) async fn send(&mut self, payload: &[u8]) -> Result<()> {
|
||||
let frame = build_rpc_frame(self.seq_no, payload, self.crc_mode);
|
||||
self.seq_no = self.seq_no.wrapping_add(1);
|
||||
|
||||
let pad = (16 - (frame.len() % 16)) % 16;
|
||||
let mut buf = frame;
|
||||
let pad_pattern: [u8; 4] = [0x04, 0x00, 0x00, 0x00];
|
||||
for i in 0..pad {
|
||||
buf.push(pad_pattern[i % 4]);
|
||||
}
|
||||
|
||||
let cipher = AesCbc::new(self.key, self.iv);
|
||||
cipher
|
||||
.encrypt_in_place(&mut buf)
|
||||
.map_err(|e| ProxyError::Crypto(format!("{e}")))?;
|
||||
|
||||
if buf.len() >= 16 {
|
||||
self.iv.copy_from_slice(&buf[buf.len() - 16..]);
|
||||
}
|
||||
self.writer.write_all(&buf).await.map_err(ProxyError::Io)
|
||||
}
|
||||
|
||||
pub(crate) async fn send_and_flush(&mut self, payload: &[u8]) -> Result<()> {
|
||||
self.send(payload).await?;
|
||||
self.writer.flush().await.map_err(ProxyError::Io)
|
||||
}
|
||||
}
|
||||
620
src/transport/middle_proxy/config_updater.rs
Normal file
620
src/transport/middle_proxy/config_updater.rs
Normal file
@@ -0,0 +1,620 @@
|
||||
use std::collections::HashMap;
|
||||
use std::hash::{DefaultHasher, Hash, Hasher};
|
||||
use std::net::IpAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use httpdate;
|
||||
use tokio::sync::{mpsc, watch};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::error::Result;
|
||||
|
||||
use super::MePool;
|
||||
use super::rotation::{MeReinitTrigger, enqueue_reinit_trigger};
|
||||
use super::secret::download_proxy_secret_with_max_len;
|
||||
use std::time::SystemTime;
|
||||
|
||||
async fn retry_fetch(url: &str) -> Option<ProxyConfigData> {
|
||||
let delays = [1u64, 5, 15];
|
||||
for (i, d) in delays.iter().enumerate() {
|
||||
match fetch_proxy_config(url).await {
|
||||
Ok(cfg) => return Some(cfg),
|
||||
Err(e) => {
|
||||
if i == delays.len() - 1 {
|
||||
warn!(error = %e, url, "fetch_proxy_config failed");
|
||||
} else {
|
||||
debug!(error = %e, url, "fetch_proxy_config retrying");
|
||||
tokio::time::sleep(Duration::from_secs(*d)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ProxyConfigData {
|
||||
pub map: HashMap<i32, Vec<(IpAddr, u16)>>,
|
||||
pub default_dc: Option<i32>,
|
||||
pub http_status: u16,
|
||||
pub proxy_for_lines: u32,
|
||||
}
|
||||
|
||||
pub fn parse_proxy_config_text(text: &str, http_status: u16) -> ProxyConfigData {
|
||||
let mut map: HashMap<i32, Vec<(IpAddr, u16)>> = HashMap::new();
|
||||
let mut proxy_for_lines: u32 = 0;
|
||||
for line in text.lines() {
|
||||
if let Some((dc, ip, port)) = parse_proxy_line(line) {
|
||||
map.entry(dc).or_default().push((ip, port));
|
||||
proxy_for_lines = proxy_for_lines.saturating_add(1);
|
||||
}
|
||||
}
|
||||
|
||||
let default_dc = text.lines().find_map(|l| {
|
||||
let t = l.trim();
|
||||
if let Some(rest) = t.strip_prefix("default") {
|
||||
return rest.trim().trim_end_matches(';').parse::<i32>().ok();
|
||||
}
|
||||
None
|
||||
});
|
||||
|
||||
ProxyConfigData {
|
||||
map,
|
||||
default_dc,
|
||||
http_status,
|
||||
proxy_for_lines,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn load_proxy_config_cache(path: &str) -> Result<ProxyConfigData> {
|
||||
let text = tokio::fs::read_to_string(path).await.map_err(|e| {
|
||||
crate::error::ProxyError::Proxy(format!("read proxy-config cache '{path}' failed: {e}"))
|
||||
})?;
|
||||
Ok(parse_proxy_config_text(&text, 200))
|
||||
}
|
||||
|
||||
pub async fn save_proxy_config_cache(path: &str, raw_text: &str) -> Result<()> {
|
||||
if let Some(parent) = Path::new(path).parent()
|
||||
&& !parent.as_os_str().is_empty()
|
||||
{
|
||||
tokio::fs::create_dir_all(parent).await.map_err(|e| {
|
||||
crate::error::ProxyError::Proxy(format!(
|
||||
"create proxy-config cache dir '{}' failed: {e}",
|
||||
parent.display()
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
||||
tokio::fs::write(path, raw_text).await.map_err(|e| {
|
||||
crate::error::ProxyError::Proxy(format!("write proxy-config cache '{path}' failed: {e}"))
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn fetch_proxy_config_with_raw(url: &str) -> Result<(ProxyConfigData, String)> {
|
||||
let resp = reqwest::get(url)
|
||||
.await
|
||||
.map_err(|e| crate::error::ProxyError::Proxy(format!("fetch_proxy_config GET failed: {e}")))?
|
||||
;
|
||||
let http_status = resp.status().as_u16();
|
||||
|
||||
if let Some(date) = resp.headers().get(reqwest::header::DATE)
|
||||
&& let Ok(date_str) = date.to_str()
|
||||
&& let Ok(server_time) = httpdate::parse_http_date(date_str)
|
||||
&& let Ok(skew) = SystemTime::now().duration_since(server_time).or_else(|e| {
|
||||
server_time.duration_since(SystemTime::now()).map_err(|_| e)
|
||||
})
|
||||
{
|
||||
let skew_secs = skew.as_secs();
|
||||
if skew_secs > 60 {
|
||||
warn!(skew_secs, "Time skew >60s detected from fetch_proxy_config Date header");
|
||||
} else if skew_secs > 30 {
|
||||
warn!(skew_secs, "Time skew >30s detected from fetch_proxy_config Date header");
|
||||
}
|
||||
}
|
||||
|
||||
let text = resp
|
||||
.text()
|
||||
.await
|
||||
.map_err(|e| crate::error::ProxyError::Proxy(format!("fetch_proxy_config read failed: {e}")))?;
|
||||
let parsed = parse_proxy_config_text(&text, http_status);
|
||||
Ok((parsed, text))
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct StableSnapshot {
|
||||
candidate_hash: Option<u64>,
|
||||
candidate_hits: u8,
|
||||
applied_hash: Option<u64>,
|
||||
}
|
||||
|
||||
impl StableSnapshot {
|
||||
fn observe(&mut self, hash: u64) -> u8 {
|
||||
if self.candidate_hash == Some(hash) {
|
||||
self.candidate_hits = self.candidate_hits.saturating_add(1);
|
||||
} else {
|
||||
self.candidate_hash = Some(hash);
|
||||
self.candidate_hits = 1;
|
||||
}
|
||||
self.candidate_hits
|
||||
}
|
||||
|
||||
fn is_applied(&self, hash: u64) -> bool {
|
||||
self.applied_hash == Some(hash)
|
||||
}
|
||||
|
||||
fn mark_applied(&mut self, hash: u64) {
|
||||
self.applied_hash = Some(hash);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct UpdaterState {
|
||||
config_v4: StableSnapshot,
|
||||
config_v6: StableSnapshot,
|
||||
secret: StableSnapshot,
|
||||
last_map_apply_at: Option<tokio::time::Instant>,
|
||||
}
|
||||
|
||||
fn hash_proxy_config(cfg: &ProxyConfigData) -> u64 {
|
||||
let mut hasher = DefaultHasher::new();
|
||||
cfg.default_dc.hash(&mut hasher);
|
||||
|
||||
let mut by_dc: Vec<(i32, Vec<(IpAddr, u16)>)> =
|
||||
cfg.map.iter().map(|(dc, addrs)| (*dc, addrs.clone())).collect();
|
||||
by_dc.sort_by_key(|(dc, _)| *dc);
|
||||
for (dc, mut addrs) in by_dc {
|
||||
dc.hash(&mut hasher);
|
||||
addrs.sort_unstable();
|
||||
for (ip, port) in addrs {
|
||||
ip.hash(&mut hasher);
|
||||
port.hash(&mut hasher);
|
||||
}
|
||||
}
|
||||
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
fn hash_secret(secret: &[u8]) -> u64 {
|
||||
let mut hasher = DefaultHasher::new();
|
||||
secret.hash(&mut hasher);
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
fn map_apply_cooldown_ready(
|
||||
last_applied: Option<tokio::time::Instant>,
|
||||
cooldown: Duration,
|
||||
) -> bool {
|
||||
if cooldown.is_zero() {
|
||||
return true;
|
||||
}
|
||||
match last_applied {
|
||||
Some(ts) => ts.elapsed() >= cooldown,
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_apply_cooldown_remaining_secs(
|
||||
last_applied: tokio::time::Instant,
|
||||
cooldown: Duration,
|
||||
) -> u64 {
|
||||
if cooldown.is_zero() {
|
||||
return 0;
|
||||
}
|
||||
cooldown
|
||||
.checked_sub(last_applied.elapsed())
|
||||
.map(|d| d.as_secs())
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
fn parse_host_port(s: &str) -> Option<(IpAddr, u16)> {
|
||||
if let Some(bracket_end) = s.rfind(']')
|
||||
&& s.starts_with('[')
|
||||
&& bracket_end + 1 < s.len()
|
||||
&& s.as_bytes().get(bracket_end + 1) == Some(&b':')
|
||||
{
|
||||
let host = &s[1..bracket_end];
|
||||
let port_str = &s[bracket_end + 2..];
|
||||
let ip = host.parse::<IpAddr>().ok()?;
|
||||
let port = port_str.parse::<u16>().ok()?;
|
||||
return Some((ip, port));
|
||||
}
|
||||
|
||||
let idx = s.rfind(':')?;
|
||||
let host = &s[..idx];
|
||||
let port_str = &s[idx + 1..];
|
||||
let ip = host.parse::<IpAddr>().ok()?;
|
||||
let port = port_str.parse::<u16>().ok()?;
|
||||
Some((ip, port))
|
||||
}
|
||||
|
||||
fn parse_proxy_line(line: &str) -> Option<(i32, IpAddr, u16)> {
|
||||
// Accepts lines like:
|
||||
// proxy_for 4 91.108.4.195:8888;
|
||||
// proxy_for 2 [2001:67c:04e8:f002::d]:80;
|
||||
// proxy_for 2 2001:67c:04e8:f002::d:80;
|
||||
let trimmed = line.trim();
|
||||
if !trimmed.starts_with("proxy_for") {
|
||||
return None;
|
||||
}
|
||||
// Capture everything between dc and trailing ';'
|
||||
let without_prefix = trimmed.trim_start_matches("proxy_for").trim();
|
||||
let mut parts = without_prefix.split_whitespace();
|
||||
let dc_str = parts.next()?;
|
||||
let rest = parts.next()?;
|
||||
let host_port = rest.trim_end_matches(';');
|
||||
let dc = dc_str.parse::<i32>().ok()?;
|
||||
let (ip, port) = parse_host_port(host_port)?;
|
||||
Some((dc, ip, port))
|
||||
}
|
||||
|
||||
pub async fn fetch_proxy_config(url: &str) -> Result<ProxyConfigData> {
|
||||
fetch_proxy_config_with_raw(url)
|
||||
.await
|
||||
.map(|(parsed, _raw)| parsed)
|
||||
}
|
||||
|
||||
fn snapshot_passes_guards(
|
||||
cfg: &ProxyConfig,
|
||||
snapshot: &ProxyConfigData,
|
||||
snapshot_name: &'static str,
|
||||
) -> bool {
|
||||
if cfg.general.me_snapshot_require_http_2xx
|
||||
&& !(200..=299).contains(&snapshot.http_status)
|
||||
{
|
||||
warn!(
|
||||
snapshot = snapshot_name,
|
||||
http_status = snapshot.http_status,
|
||||
"ME snapshot rejected by non-2xx HTTP status"
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
let min_proxy_for = cfg.general.me_snapshot_min_proxy_for_lines;
|
||||
if snapshot.proxy_for_lines < min_proxy_for {
|
||||
warn!(
|
||||
snapshot = snapshot_name,
|
||||
parsed_proxy_for_lines = snapshot.proxy_for_lines,
|
||||
min_proxy_for_lines = min_proxy_for,
|
||||
"ME snapshot rejected by proxy_for line floor"
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
async fn run_update_cycle(
|
||||
pool: &Arc<MePool>,
|
||||
cfg: &ProxyConfig,
|
||||
state: &mut UpdaterState,
|
||||
reinit_tx: &mpsc::Sender<MeReinitTrigger>,
|
||||
) {
|
||||
pool.update_runtime_reinit_policy(
|
||||
cfg.general.hardswap,
|
||||
cfg.general.me_pool_drain_ttl_secs,
|
||||
cfg.general.effective_me_pool_force_close_secs(),
|
||||
cfg.general.me_pool_min_fresh_ratio,
|
||||
cfg.general.me_hardswap_warmup_delay_min_ms,
|
||||
cfg.general.me_hardswap_warmup_delay_max_ms,
|
||||
cfg.general.me_hardswap_warmup_extra_passes,
|
||||
cfg.general.me_hardswap_warmup_pass_backoff_base_ms,
|
||||
cfg.general.me_bind_stale_mode,
|
||||
cfg.general.me_bind_stale_ttl_secs,
|
||||
cfg.general.me_secret_atomic_snapshot,
|
||||
cfg.general.me_deterministic_writer_sort,
|
||||
cfg.general.me_writer_pick_mode,
|
||||
cfg.general.me_writer_pick_sample_size,
|
||||
cfg.general.me_single_endpoint_shadow_writers,
|
||||
cfg.general.me_single_endpoint_outage_mode_enabled,
|
||||
cfg.general.me_single_endpoint_outage_disable_quarantine,
|
||||
cfg.general.me_single_endpoint_outage_backoff_min_ms,
|
||||
cfg.general.me_single_endpoint_outage_backoff_max_ms,
|
||||
cfg.general.me_single_endpoint_shadow_rotate_every_secs,
|
||||
cfg.general.me_floor_mode,
|
||||
cfg.general.me_adaptive_floor_idle_secs,
|
||||
cfg.general.me_adaptive_floor_min_writers_single_endpoint,
|
||||
cfg.general.me_adaptive_floor_min_writers_multi_endpoint,
|
||||
cfg.general.me_adaptive_floor_recover_grace_secs,
|
||||
cfg.general.me_adaptive_floor_writers_per_core_total,
|
||||
cfg.general.me_adaptive_floor_cpu_cores_override,
|
||||
cfg.general.me_adaptive_floor_max_extra_writers_single_per_core,
|
||||
cfg.general.me_adaptive_floor_max_extra_writers_multi_per_core,
|
||||
cfg.general.me_adaptive_floor_max_active_writers_per_core,
|
||||
cfg.general.me_adaptive_floor_max_warm_writers_per_core,
|
||||
cfg.general.me_adaptive_floor_max_active_writers_global,
|
||||
cfg.general.me_adaptive_floor_max_warm_writers_global,
|
||||
cfg.general.me_health_interval_ms_unhealthy,
|
||||
cfg.general.me_health_interval_ms_healthy,
|
||||
cfg.general.me_warn_rate_limit_ms,
|
||||
);
|
||||
|
||||
let required_cfg_snapshots = cfg.general.me_config_stable_snapshots.max(1);
|
||||
let required_secret_snapshots = cfg.general.proxy_secret_stable_snapshots.max(1);
|
||||
let apply_cooldown = Duration::from_secs(cfg.general.me_config_apply_cooldown_secs);
|
||||
let mut maps_changed = false;
|
||||
|
||||
let mut ready_v4: Option<(ProxyConfigData, u64)> = None;
|
||||
let cfg_v4 = retry_fetch("https://core.telegram.org/getProxyConfig").await;
|
||||
if let Some(cfg_v4) = cfg_v4 {
|
||||
if snapshot_passes_guards(cfg, &cfg_v4, "getProxyConfig") {
|
||||
let cfg_v4_hash = hash_proxy_config(&cfg_v4);
|
||||
let stable_hits = state.config_v4.observe(cfg_v4_hash);
|
||||
if stable_hits < required_cfg_snapshots {
|
||||
debug!(
|
||||
stable_hits,
|
||||
required_cfg_snapshots,
|
||||
snapshot = format_args!("0x{cfg_v4_hash:016x}"),
|
||||
"ME config v4 candidate observed"
|
||||
);
|
||||
} else if state.config_v4.is_applied(cfg_v4_hash) {
|
||||
debug!(
|
||||
snapshot = format_args!("0x{cfg_v4_hash:016x}"),
|
||||
"ME config v4 stable snapshot already applied"
|
||||
);
|
||||
} else {
|
||||
ready_v4 = Some((cfg_v4, cfg_v4_hash));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut ready_v6: Option<(ProxyConfigData, u64)> = None;
|
||||
let cfg_v6 = retry_fetch("https://core.telegram.org/getProxyConfigV6").await;
|
||||
if let Some(cfg_v6) = cfg_v6 {
|
||||
if snapshot_passes_guards(cfg, &cfg_v6, "getProxyConfigV6") {
|
||||
let cfg_v6_hash = hash_proxy_config(&cfg_v6);
|
||||
let stable_hits = state.config_v6.observe(cfg_v6_hash);
|
||||
if stable_hits < required_cfg_snapshots {
|
||||
debug!(
|
||||
stable_hits,
|
||||
required_cfg_snapshots,
|
||||
snapshot = format_args!("0x{cfg_v6_hash:016x}"),
|
||||
"ME config v6 candidate observed"
|
||||
);
|
||||
} else if state.config_v6.is_applied(cfg_v6_hash) {
|
||||
debug!(
|
||||
snapshot = format_args!("0x{cfg_v6_hash:016x}"),
|
||||
"ME config v6 stable snapshot already applied"
|
||||
);
|
||||
} else {
|
||||
ready_v6 = Some((cfg_v6, cfg_v6_hash));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ready_v4.is_some() || ready_v6.is_some() {
|
||||
if map_apply_cooldown_ready(state.last_map_apply_at, apply_cooldown) {
|
||||
let update_v4 = ready_v4
|
||||
.as_ref()
|
||||
.map(|(snapshot, _)| snapshot.map.clone())
|
||||
.unwrap_or_default();
|
||||
let update_v6 = ready_v6
|
||||
.as_ref()
|
||||
.map(|(snapshot, _)| snapshot.map.clone());
|
||||
let update_is_empty =
|
||||
update_v4.is_empty() && update_v6.as_ref().is_none_or(|v| v.is_empty());
|
||||
let apply_outcome = if update_is_empty && !cfg.general.me_snapshot_reject_empty_map {
|
||||
super::pool_config::SnapshotApplyOutcome::AppliedNoDelta
|
||||
} else {
|
||||
pool.update_proxy_maps(update_v4, update_v6).await
|
||||
};
|
||||
|
||||
if matches!(
|
||||
apply_outcome,
|
||||
super::pool_config::SnapshotApplyOutcome::RejectedEmpty
|
||||
) {
|
||||
warn!("ME config stable snapshot rejected (empty endpoint map)");
|
||||
} else {
|
||||
if let Some((snapshot, hash)) = ready_v4 {
|
||||
if let Some(dc) = snapshot.default_dc {
|
||||
pool.default_dc
|
||||
.store(dc, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
state.config_v4.mark_applied(hash);
|
||||
}
|
||||
|
||||
if let Some((_snapshot, hash)) = ready_v6 {
|
||||
state.config_v6.mark_applied(hash);
|
||||
}
|
||||
|
||||
state.last_map_apply_at = Some(tokio::time::Instant::now());
|
||||
|
||||
if apply_outcome.changed() {
|
||||
maps_changed = true;
|
||||
info!("ME config update applied after stable-gate");
|
||||
} else {
|
||||
debug!("ME config stable-gate applied with no map delta");
|
||||
}
|
||||
}
|
||||
} else if let Some(last) = state.last_map_apply_at {
|
||||
let wait_secs = map_apply_cooldown_remaining_secs(last, apply_cooldown);
|
||||
debug!(
|
||||
wait_secs,
|
||||
"ME config stable snapshot deferred by cooldown"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if maps_changed {
|
||||
enqueue_reinit_trigger(reinit_tx, MeReinitTrigger::MapChanged);
|
||||
}
|
||||
|
||||
pool.reset_stun_state();
|
||||
|
||||
if cfg.general.proxy_secret_rotate_runtime {
|
||||
match download_proxy_secret_with_max_len(cfg.general.proxy_secret_len_max).await {
|
||||
Ok(secret) => {
|
||||
let secret_hash = hash_secret(&secret);
|
||||
let stable_hits = state.secret.observe(secret_hash);
|
||||
if stable_hits < required_secret_snapshots {
|
||||
debug!(
|
||||
stable_hits,
|
||||
required_secret_snapshots,
|
||||
snapshot = format_args!("0x{secret_hash:016x}"),
|
||||
"proxy-secret candidate observed"
|
||||
);
|
||||
} else if state.secret.is_applied(secret_hash) {
|
||||
debug!(
|
||||
snapshot = format_args!("0x{secret_hash:016x}"),
|
||||
"proxy-secret stable snapshot already applied"
|
||||
);
|
||||
} else {
|
||||
let rotated = pool.update_secret(secret).await;
|
||||
state.secret.mark_applied(secret_hash);
|
||||
if rotated {
|
||||
info!("proxy-secret rotated after stable-gate");
|
||||
} else {
|
||||
debug!("proxy-secret stable snapshot confirmed as unchanged");
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => warn!(error = %e, "proxy-secret update failed"),
|
||||
}
|
||||
} else {
|
||||
debug!("proxy-secret runtime rotation disabled by config");
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn me_config_updater(
|
||||
pool: Arc<MePool>,
|
||||
mut config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
reinit_tx: mpsc::Sender<MeReinitTrigger>,
|
||||
) {
|
||||
let mut state = UpdaterState::default();
|
||||
let mut update_every_secs = config_rx
|
||||
.borrow()
|
||||
.general
|
||||
.effective_update_every_secs()
|
||||
.max(1);
|
||||
let mut update_every = Duration::from_secs(update_every_secs);
|
||||
let mut next_tick = tokio::time::Instant::now() + update_every;
|
||||
info!(update_every_secs, "ME config updater started");
|
||||
|
||||
loop {
|
||||
let sleep = tokio::time::sleep_until(next_tick);
|
||||
tokio::pin!(sleep);
|
||||
|
||||
tokio::select! {
|
||||
_ = &mut sleep => {
|
||||
let cfg = config_rx.borrow().clone();
|
||||
run_update_cycle(&pool, cfg.as_ref(), &mut state, &reinit_tx).await;
|
||||
let refreshed_secs = cfg.general.effective_update_every_secs().max(1);
|
||||
if refreshed_secs != update_every_secs {
|
||||
info!(
|
||||
old_update_every_secs = update_every_secs,
|
||||
new_update_every_secs = refreshed_secs,
|
||||
"ME config updater interval changed"
|
||||
);
|
||||
update_every_secs = refreshed_secs;
|
||||
update_every = Duration::from_secs(update_every_secs);
|
||||
}
|
||||
next_tick = tokio::time::Instant::now() + update_every;
|
||||
}
|
||||
changed = config_rx.changed() => {
|
||||
if changed.is_err() {
|
||||
warn!("ME config updater stopped: config channel closed");
|
||||
break;
|
||||
}
|
||||
let cfg = config_rx.borrow().clone();
|
||||
pool.update_runtime_reinit_policy(
|
||||
cfg.general.hardswap,
|
||||
cfg.general.me_pool_drain_ttl_secs,
|
||||
cfg.general.effective_me_pool_force_close_secs(),
|
||||
cfg.general.me_pool_min_fresh_ratio,
|
||||
cfg.general.me_hardswap_warmup_delay_min_ms,
|
||||
cfg.general.me_hardswap_warmup_delay_max_ms,
|
||||
cfg.general.me_hardswap_warmup_extra_passes,
|
||||
cfg.general.me_hardswap_warmup_pass_backoff_base_ms,
|
||||
cfg.general.me_bind_stale_mode,
|
||||
cfg.general.me_bind_stale_ttl_secs,
|
||||
cfg.general.me_secret_atomic_snapshot,
|
||||
cfg.general.me_deterministic_writer_sort,
|
||||
cfg.general.me_writer_pick_mode,
|
||||
cfg.general.me_writer_pick_sample_size,
|
||||
cfg.general.me_single_endpoint_shadow_writers,
|
||||
cfg.general.me_single_endpoint_outage_mode_enabled,
|
||||
cfg.general.me_single_endpoint_outage_disable_quarantine,
|
||||
cfg.general.me_single_endpoint_outage_backoff_min_ms,
|
||||
cfg.general.me_single_endpoint_outage_backoff_max_ms,
|
||||
cfg.general.me_single_endpoint_shadow_rotate_every_secs,
|
||||
cfg.general.me_floor_mode,
|
||||
cfg.general.me_adaptive_floor_idle_secs,
|
||||
cfg.general.me_adaptive_floor_min_writers_single_endpoint,
|
||||
cfg.general.me_adaptive_floor_min_writers_multi_endpoint,
|
||||
cfg.general.me_adaptive_floor_recover_grace_secs,
|
||||
cfg.general.me_adaptive_floor_writers_per_core_total,
|
||||
cfg.general.me_adaptive_floor_cpu_cores_override,
|
||||
cfg.general.me_adaptive_floor_max_extra_writers_single_per_core,
|
||||
cfg.general.me_adaptive_floor_max_extra_writers_multi_per_core,
|
||||
cfg.general.me_adaptive_floor_max_active_writers_per_core,
|
||||
cfg.general.me_adaptive_floor_max_warm_writers_per_core,
|
||||
cfg.general.me_adaptive_floor_max_active_writers_global,
|
||||
cfg.general.me_adaptive_floor_max_warm_writers_global,
|
||||
cfg.general.me_health_interval_ms_unhealthy,
|
||||
cfg.general.me_health_interval_ms_healthy,
|
||||
cfg.general.me_warn_rate_limit_ms,
|
||||
);
|
||||
let new_secs = cfg.general.effective_update_every_secs().max(1);
|
||||
if new_secs == update_every_secs {
|
||||
continue;
|
||||
}
|
||||
|
||||
if new_secs < update_every_secs {
|
||||
info!(
|
||||
old_update_every_secs = update_every_secs,
|
||||
new_update_every_secs = new_secs,
|
||||
"ME config updater interval decreased, running immediate refresh"
|
||||
);
|
||||
update_every_secs = new_secs;
|
||||
update_every = Duration::from_secs(update_every_secs);
|
||||
run_update_cycle(&pool, cfg.as_ref(), &mut state, &reinit_tx).await;
|
||||
next_tick = tokio::time::Instant::now() + update_every;
|
||||
} else {
|
||||
info!(
|
||||
old_update_every_secs = update_every_secs,
|
||||
new_update_every_secs = new_secs,
|
||||
"ME config updater interval increased"
|
||||
);
|
||||
update_every_secs = new_secs;
|
||||
update_every = Duration::from_secs(update_every_secs);
|
||||
next_tick = tokio::time::Instant::now() + update_every;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn parse_ipv6_bracketed() {
|
||||
let line = "proxy_for 2 [2001:67c:04e8:f002::d]:80;";
|
||||
let res = parse_proxy_line(line).unwrap();
|
||||
assert_eq!(res.0, 2);
|
||||
assert_eq!(res.1, "2001:67c:04e8:f002::d".parse::<IpAddr>().unwrap());
|
||||
assert_eq!(res.2, 80);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_ipv6_plain() {
|
||||
let line = "proxy_for 2 2001:67c:04e8:f002::d:80;";
|
||||
let res = parse_proxy_line(line).unwrap();
|
||||
assert_eq!(res.0, 2);
|
||||
assert_eq!(res.1, "2001:67c:04e8:f002::d".parse::<IpAddr>().unwrap());
|
||||
assert_eq!(res.2, 80);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_ipv4() {
|
||||
let line = "proxy_for 4 91.108.4.195:8888;";
|
||||
let res = parse_proxy_line(line).unwrap();
|
||||
assert_eq!(res.0, 4);
|
||||
assert_eq!(res.1, "91.108.4.195".parse::<IpAddr>().unwrap());
|
||||
assert_eq!(res.2, 8888);
|
||||
}
|
||||
}
|
||||
658
src/transport/middle_proxy/handshake.rs
Normal file
658
src/transport/middle_proxy/handshake.rs
Normal file
@@ -0,0 +1,658 @@
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::{Duration, Instant};
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
use socket2::{SockRef, TcpKeepalive};
|
||||
#[cfg(target_os = "linux")]
|
||||
use libc;
|
||||
#[cfg(target_os = "linux")]
|
||||
use std::os::fd::{AsRawFd, RawFd};
|
||||
#[cfg(target_os = "linux")]
|
||||
use std::os::raw::c_int;
|
||||
|
||||
use bytes::BytesMut;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, ReadHalf, WriteHalf};
|
||||
use tokio::net::{TcpStream, TcpSocket};
|
||||
use tokio::time::timeout;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::config::MeSocksKdfPolicy;
|
||||
use crate::crypto::{SecureRandom, build_middleproxy_prekey, derive_middleproxy_keys, sha256};
|
||||
use crate::error::{ProxyError, Result};
|
||||
use crate::network::IpFamily;
|
||||
use crate::network::probe::is_bogon;
|
||||
use crate::protocol::constants::{
|
||||
ME_CONNECT_TIMEOUT_SECS, ME_HANDSHAKE_TIMEOUT_SECS, RPC_CRYPTO_AES_U32,
|
||||
RPC_HANDSHAKE_ERROR_U32, rpc_crypto_flags,
|
||||
};
|
||||
use crate::transport::{UpstreamEgressInfo, UpstreamRouteKind};
|
||||
|
||||
use super::codec::{
|
||||
RpcChecksumMode, build_handshake_payload, build_nonce_payload, build_rpc_frame,
|
||||
cbc_decrypt_inplace, cbc_encrypt_padded, parse_handshake_flags, parse_nonce_payload,
|
||||
read_rpc_frame_plaintext, rpc_crc,
|
||||
};
|
||||
use super::wire::{extract_ip_material, IpMaterial};
|
||||
use super::MePool;
|
||||
|
||||
const ME_KDF_DRIFT_STRICT: bool = false;
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
enum KdfClientPortSource {
|
||||
LocalSocket = 0,
|
||||
SocksBound = 1,
|
||||
}
|
||||
|
||||
impl KdfClientPortSource {
|
||||
fn from_socks_bound_port(socks_bound_port: Option<u16>) -> Self {
|
||||
if socks_bound_port.is_some() {
|
||||
Self::SocksBound
|
||||
} else {
|
||||
Self::LocalSocket
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Result of a successful ME handshake with timings.
|
||||
pub(crate) struct HandshakeOutput {
|
||||
pub rd: ReadHalf<TcpStream>,
|
||||
pub wr: WriteHalf<TcpStream>,
|
||||
pub read_key: [u8; 32],
|
||||
pub read_iv: [u8; 16],
|
||||
pub write_key: [u8; 32],
|
||||
pub write_iv: [u8; 16],
|
||||
pub crc_mode: RpcChecksumMode,
|
||||
pub handshake_ms: f64,
|
||||
}
|
||||
|
||||
impl MePool {
|
||||
fn kdf_material_fingerprint(
|
||||
local_ip_nat: IpAddr,
|
||||
peer_addr_nat: SocketAddr,
|
||||
reflected_ip: Option<IpAddr>,
|
||||
socks_bound_ip: Option<IpAddr>,
|
||||
client_port_source: KdfClientPortSource,
|
||||
) -> u64 {
|
||||
let mut hasher = DefaultHasher::new();
|
||||
local_ip_nat.hash(&mut hasher);
|
||||
peer_addr_nat.hash(&mut hasher);
|
||||
reflected_ip.hash(&mut hasher);
|
||||
socks_bound_ip.hash(&mut hasher);
|
||||
client_port_source.hash(&mut hasher);
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
async fn resolve_dc_idx_for_endpoint(&self, addr: SocketAddr) -> Option<i16> {
|
||||
i16::try_from(self.resolve_dc_for_endpoint(addr).await).ok()
|
||||
}
|
||||
|
||||
fn direct_bind_ip_for_stun(
|
||||
family: IpFamily,
|
||||
upstream_egress: Option<UpstreamEgressInfo>,
|
||||
) -> Option<IpAddr> {
|
||||
let info = upstream_egress?;
|
||||
if info.route_kind != UpstreamRouteKind::Direct {
|
||||
return None;
|
||||
}
|
||||
match (family, info.direct_bind_ip) {
|
||||
(IpFamily::V4, Some(IpAddr::V4(ip))) => Some(IpAddr::V4(ip)),
|
||||
(IpFamily::V6, Some(IpAddr::V6(ip))) => Some(IpAddr::V6(ip)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn select_socks_bound_addr(
|
||||
family: IpFamily,
|
||||
upstream_egress: Option<UpstreamEgressInfo>,
|
||||
) -> Option<SocketAddr> {
|
||||
let info = upstream_egress?;
|
||||
if !matches!(
|
||||
info.route_kind,
|
||||
UpstreamRouteKind::Socks4 | UpstreamRouteKind::Socks5
|
||||
) {
|
||||
return None;
|
||||
}
|
||||
let bound = info.socks_bound_addr?;
|
||||
let family_matches = matches!(
|
||||
(family, bound.ip()),
|
||||
(IpFamily::V4, IpAddr::V4(_)) | (IpFamily::V6, IpAddr::V6(_))
|
||||
);
|
||||
if !family_matches || is_bogon(bound.ip()) || bound.ip().is_unspecified() {
|
||||
return None;
|
||||
}
|
||||
Some(bound)
|
||||
}
|
||||
|
||||
fn is_socks_route(upstream_egress: Option<UpstreamEgressInfo>) -> bool {
|
||||
matches!(
|
||||
upstream_egress.map(|info| info.route_kind),
|
||||
Some(UpstreamRouteKind::Socks4 | UpstreamRouteKind::Socks5)
|
||||
)
|
||||
}
|
||||
|
||||
/// TCP connect with timeout + return RTT in milliseconds.
|
||||
pub(crate) async fn connect_tcp(
|
||||
&self,
|
||||
addr: SocketAddr,
|
||||
dc_idx_override: Option<i16>,
|
||||
) -> Result<(TcpStream, f64, Option<UpstreamEgressInfo>)> {
|
||||
let start = Instant::now();
|
||||
let (stream, upstream_egress) = if let Some(upstream) = &self.upstream {
|
||||
let dc_idx = if let Some(dc_idx) = dc_idx_override {
|
||||
Some(dc_idx)
|
||||
} else {
|
||||
self.resolve_dc_idx_for_endpoint(addr).await
|
||||
};
|
||||
let (stream, egress) = upstream.connect_with_details(addr, dc_idx, None).await?;
|
||||
(stream, Some(egress))
|
||||
} else {
|
||||
let connect_fut = async {
|
||||
if addr.is_ipv6()
|
||||
&& let Some(v6) = self.detected_ipv6
|
||||
{
|
||||
match TcpSocket::new_v6() {
|
||||
Ok(sock) => {
|
||||
if let Err(e) = sock.bind(SocketAddr::new(IpAddr::V6(v6), 0)) {
|
||||
debug!(error = %e, bind_ip = %v6, "ME IPv6 bind failed, falling back to default bind");
|
||||
} else {
|
||||
match sock.connect(addr).await {
|
||||
Ok(stream) => return Ok(stream),
|
||||
Err(e) => debug!(error = %e, target = %addr, "ME IPv6 bound connect failed, retrying default connect"),
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => debug!(error = %e, "ME IPv6 socket creation failed, falling back to default connect"),
|
||||
}
|
||||
}
|
||||
TcpStream::connect(addr).await
|
||||
};
|
||||
|
||||
let stream = timeout(Duration::from_secs(ME_CONNECT_TIMEOUT_SECS), connect_fut)
|
||||
.await
|
||||
.map_err(|_| ProxyError::ConnectionTimeout {
|
||||
addr: addr.to_string(),
|
||||
})??;
|
||||
(stream, None)
|
||||
};
|
||||
|
||||
let connect_ms = start.elapsed().as_secs_f64() * 1000.0;
|
||||
stream.set_nodelay(true).ok();
|
||||
if let Err(e) = Self::configure_keepalive(&stream) {
|
||||
warn!(error = %e, "ME keepalive setup failed");
|
||||
}
|
||||
#[cfg(target_os = "linux")]
|
||||
if let Err(e) = Self::configure_user_timeout(stream.as_raw_fd()) {
|
||||
warn!(error = %e, "ME TCP_USER_TIMEOUT setup failed");
|
||||
}
|
||||
Ok((stream, connect_ms, upstream_egress))
|
||||
}
|
||||
|
||||
fn configure_keepalive(stream: &TcpStream) -> std::io::Result<()> {
|
||||
let sock = SockRef::from(stream);
|
||||
let ka = TcpKeepalive::new()
|
||||
.with_time(Duration::from_secs(30))
|
||||
.with_interval(Duration::from_secs(10))
|
||||
.with_retries(3);
|
||||
sock.set_tcp_keepalive(&ka)?;
|
||||
sock.set_keepalive(true)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
fn configure_user_timeout(fd: RawFd) -> std::io::Result<()> {
|
||||
let timeout_ms: c_int = 30_000;
|
||||
let rc = unsafe {
|
||||
libc::setsockopt(
|
||||
fd,
|
||||
libc::IPPROTO_TCP,
|
||||
libc::TCP_USER_TIMEOUT,
|
||||
&timeout_ms as *const _ as *const libc::c_void,
|
||||
std::mem::size_of_val(&timeout_ms) as libc::socklen_t,
|
||||
)
|
||||
};
|
||||
if rc != 0 {
|
||||
return Err(std::io::Error::last_os_error());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Perform full ME RPC handshake on an established TCP stream.
|
||||
/// Returns cipher keys/ivs and split halves; does not register writer.
|
||||
pub(crate) async fn handshake_only(
|
||||
&self,
|
||||
stream: TcpStream,
|
||||
addr: SocketAddr,
|
||||
upstream_egress: Option<UpstreamEgressInfo>,
|
||||
rng: &SecureRandom,
|
||||
) -> Result<HandshakeOutput> {
|
||||
let hs_start = Instant::now();
|
||||
|
||||
let local_addr = stream.local_addr().map_err(ProxyError::Io)?;
|
||||
let transport_peer_addr = stream.peer_addr().map_err(ProxyError::Io)?;
|
||||
let peer_addr = addr;
|
||||
|
||||
let _ = self.maybe_detect_nat_ip(local_addr.ip()).await;
|
||||
let family = if local_addr.ip().is_ipv4() {
|
||||
IpFamily::V4
|
||||
} else {
|
||||
IpFamily::V6
|
||||
};
|
||||
let is_socks_route = Self::is_socks_route(upstream_egress);
|
||||
let socks_bound_addr = Self::select_socks_bound_addr(family, upstream_egress);
|
||||
let reflected = if let Some(bound) = socks_bound_addr {
|
||||
Some(bound)
|
||||
} else if is_socks_route {
|
||||
match self.socks_kdf_policy() {
|
||||
MeSocksKdfPolicy::Strict => {
|
||||
self.stats.increment_me_socks_kdf_strict_reject();
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
"SOCKS route returned no valid BND.ADDR for ME KDF (strict policy)"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
MeSocksKdfPolicy::Compat => {
|
||||
self.stats.increment_me_socks_kdf_compat_fallback();
|
||||
if self.nat_probe {
|
||||
let bind_ip = Self::direct_bind_ip_for_stun(family, upstream_egress);
|
||||
self.maybe_reflect_public_addr(family, bind_ip).await
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if self.nat_probe {
|
||||
let bind_ip = Self::direct_bind_ip_for_stun(family, upstream_egress);
|
||||
self.maybe_reflect_public_addr(family, bind_ip).await
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let local_addr_nat = self.translate_our_addr_with_reflection(local_addr, reflected);
|
||||
let peer_addr_nat = SocketAddr::new(self.translate_ip_for_nat(peer_addr.ip()), peer_addr.port());
|
||||
let (mut rd, mut wr) = tokio::io::split(stream);
|
||||
|
||||
let my_nonce: [u8; 16] = rng.bytes(16).try_into().unwrap();
|
||||
let crypto_ts = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs() as u32;
|
||||
|
||||
let secret_atomic_snapshot = self.secret_atomic_snapshot.load(Ordering::Relaxed);
|
||||
let (ks, secret) = if secret_atomic_snapshot {
|
||||
let snapshot = self.secret_snapshot().await;
|
||||
(snapshot.key_selector, snapshot.secret)
|
||||
} else {
|
||||
// Backward-compatible mode: key selector and secret may come from different updates.
|
||||
let key_selector = self.key_selector().await;
|
||||
let secret = self.secret_snapshot().await.secret;
|
||||
(key_selector, secret)
|
||||
};
|
||||
let nonce_payload = build_nonce_payload(ks, crypto_ts, &my_nonce);
|
||||
let nonce_frame = build_rpc_frame(-2, &nonce_payload, RpcChecksumMode::Crc32);
|
||||
let dump = hex_dump(&nonce_frame[..nonce_frame.len().min(44)]);
|
||||
debug!(
|
||||
key_selector = format_args!("0x{ks:08x}"),
|
||||
crypto_ts,
|
||||
frame_len = nonce_frame.len(),
|
||||
nonce_frame_hex = %dump,
|
||||
"Sending ME nonce frame"
|
||||
);
|
||||
wr.write_all(&nonce_frame).await.map_err(ProxyError::Io)?;
|
||||
wr.flush().await.map_err(ProxyError::Io)?;
|
||||
|
||||
let (srv_seq, srv_nonce_payload) = timeout(
|
||||
Duration::from_secs(ME_HANDSHAKE_TIMEOUT_SECS),
|
||||
read_rpc_frame_plaintext(&mut rd),
|
||||
)
|
||||
.await
|
||||
.map_err(|_| ProxyError::TgHandshakeTimeout)??;
|
||||
|
||||
if srv_seq != -2 {
|
||||
return Err(ProxyError::InvalidHandshake(format!("Expected seq=-2, got {srv_seq}")));
|
||||
}
|
||||
|
||||
let (srv_key_select, schema, srv_ts, srv_nonce) = parse_nonce_payload(&srv_nonce_payload)?;
|
||||
if schema != RPC_CRYPTO_AES_U32 {
|
||||
warn!(schema = format_args!("0x{schema:08x}"), "Unsupported ME crypto schema");
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"Unsupported crypto schema: 0x{schema:x}"
|
||||
)));
|
||||
}
|
||||
|
||||
if srv_key_select != ks {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"Server key_select 0x{srv_key_select:08x} != client 0x{ks:08x}"
|
||||
)));
|
||||
}
|
||||
|
||||
let skew = crypto_ts.abs_diff(srv_ts);
|
||||
if skew > 30 {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"nonce crypto_ts skew too large: client={crypto_ts}, server={srv_ts}, skew={skew}s"
|
||||
)));
|
||||
}
|
||||
|
||||
info!(
|
||||
%local_addr,
|
||||
%local_addr_nat,
|
||||
reflected_ip = reflected.map(|r| r.ip()).as_ref().map(ToString::to_string),
|
||||
%peer_addr,
|
||||
%transport_peer_addr,
|
||||
%peer_addr_nat,
|
||||
socks_bound_addr = socks_bound_addr.map(|v| v.to_string()),
|
||||
key_selector = format_args!("0x{ks:08x}"),
|
||||
crypto_schema = format_args!("0x{schema:08x}"),
|
||||
skew_secs = skew,
|
||||
"ME key derivation parameters"
|
||||
);
|
||||
|
||||
let ts_bytes = crypto_ts.to_le_bytes();
|
||||
let server_port_bytes = peer_addr_nat.port().to_le_bytes();
|
||||
let socks_bound_port = socks_bound_addr
|
||||
.map(|bound| bound.port())
|
||||
.filter(|port| *port != 0);
|
||||
let client_port_for_kdf = socks_bound_port.unwrap_or(local_addr_nat.port());
|
||||
let client_port_source = KdfClientPortSource::from_socks_bound_port(socks_bound_port);
|
||||
let kdf_fingerprint = Self::kdf_material_fingerprint(
|
||||
local_addr_nat.ip(),
|
||||
peer_addr_nat,
|
||||
reflected.map(|value| value.ip()),
|
||||
socks_bound_addr.map(|value| value.ip()),
|
||||
client_port_source,
|
||||
);
|
||||
let previous_kdf_fingerprint = {
|
||||
let kdf_fingerprint_guard = self.kdf_material_fingerprint.read().await;
|
||||
kdf_fingerprint_guard.get(&peer_addr_nat).copied()
|
||||
};
|
||||
if let Some((prev_fingerprint, prev_client_port)) = previous_kdf_fingerprint
|
||||
{
|
||||
if prev_fingerprint != kdf_fingerprint {
|
||||
self.stats.increment_me_kdf_drift_total();
|
||||
warn!(
|
||||
%peer_addr_nat,
|
||||
%local_addr_nat,
|
||||
client_port_for_kdf,
|
||||
client_port_source = ?client_port_source,
|
||||
"ME KDF material drift detected for endpoint"
|
||||
);
|
||||
if ME_KDF_DRIFT_STRICT {
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
"ME KDF material drift detected (strict mode)".to_string(),
|
||||
));
|
||||
}
|
||||
} else if prev_client_port != client_port_for_kdf {
|
||||
self.stats.increment_me_kdf_port_only_drift_total();
|
||||
debug!(
|
||||
%peer_addr_nat,
|
||||
previous_client_port_for_kdf = prev_client_port,
|
||||
client_port_for_kdf,
|
||||
client_port_source = ?client_port_source,
|
||||
"ME KDF client port changed with stable material"
|
||||
);
|
||||
}
|
||||
}
|
||||
// Keep fingerprint updates eventually consistent for diagnostics while avoiding
|
||||
// serializing all concurrent handshakes on a single async mutex.
|
||||
let mut kdf_fingerprint_guard = self.kdf_material_fingerprint.write().await;
|
||||
kdf_fingerprint_guard.insert(peer_addr_nat, (kdf_fingerprint, client_port_for_kdf));
|
||||
drop(kdf_fingerprint_guard);
|
||||
|
||||
let client_port_bytes = client_port_for_kdf.to_le_bytes();
|
||||
|
||||
let server_ip = extract_ip_material(peer_addr_nat);
|
||||
let client_ip = extract_ip_material(local_addr_nat);
|
||||
|
||||
let (srv_ip_opt, clt_ip_opt, clt_v6_opt, srv_v6_opt, hs_our_ip, hs_peer_ip) = match (server_ip, client_ip) {
|
||||
(IpMaterial::V4(mut srv), IpMaterial::V4(mut clt)) => {
|
||||
srv.reverse();
|
||||
clt.reverse();
|
||||
(Some(srv), Some(clt), None, None, clt, srv)
|
||||
}
|
||||
(IpMaterial::V6(srv), IpMaterial::V6(clt)) => {
|
||||
let zero = [0u8; 4];
|
||||
(None, None, Some(clt), Some(srv), zero, zero)
|
||||
}
|
||||
_ => {
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
"mixed IPv4/IPv6 endpoints are not supported for ME key derivation".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let diag_level: u8 = std::env::var("ME_DIAG").ok().and_then(|v| v.parse().ok()).unwrap_or(0);
|
||||
|
||||
let prekey_client = build_middleproxy_prekey(
|
||||
&srv_nonce,
|
||||
&my_nonce,
|
||||
&ts_bytes,
|
||||
srv_ip_opt.as_ref().map(|x| &x[..]),
|
||||
&client_port_bytes,
|
||||
b"CLIENT",
|
||||
clt_ip_opt.as_ref().map(|x| &x[..]),
|
||||
&server_port_bytes,
|
||||
&secret,
|
||||
clt_v6_opt.as_ref(),
|
||||
srv_v6_opt.as_ref(),
|
||||
);
|
||||
let prekey_server = build_middleproxy_prekey(
|
||||
&srv_nonce,
|
||||
&my_nonce,
|
||||
&ts_bytes,
|
||||
srv_ip_opt.as_ref().map(|x| &x[..]),
|
||||
&client_port_bytes,
|
||||
b"SERVER",
|
||||
clt_ip_opt.as_ref().map(|x| &x[..]),
|
||||
&server_port_bytes,
|
||||
&secret,
|
||||
clt_v6_opt.as_ref(),
|
||||
srv_v6_opt.as_ref(),
|
||||
);
|
||||
|
||||
let (wk, wi) = derive_middleproxy_keys(
|
||||
&srv_nonce,
|
||||
&my_nonce,
|
||||
&ts_bytes,
|
||||
srv_ip_opt.as_ref().map(|x| &x[..]),
|
||||
&client_port_bytes,
|
||||
b"CLIENT",
|
||||
clt_ip_opt.as_ref().map(|x| &x[..]),
|
||||
&server_port_bytes,
|
||||
&secret,
|
||||
clt_v6_opt.as_ref(),
|
||||
srv_v6_opt.as_ref(),
|
||||
);
|
||||
let (rk, ri) = derive_middleproxy_keys(
|
||||
&srv_nonce,
|
||||
&my_nonce,
|
||||
&ts_bytes,
|
||||
srv_ip_opt.as_ref().map(|x| &x[..]),
|
||||
&client_port_bytes,
|
||||
b"SERVER",
|
||||
clt_ip_opt.as_ref().map(|x| &x[..]),
|
||||
&server_port_bytes,
|
||||
&secret,
|
||||
clt_v6_opt.as_ref(),
|
||||
srv_v6_opt.as_ref(),
|
||||
);
|
||||
|
||||
let requested_crc_mode = RpcChecksumMode::Crc32c;
|
||||
let hs_payload = build_handshake_payload(
|
||||
hs_our_ip,
|
||||
local_addr.port(),
|
||||
hs_peer_ip,
|
||||
peer_addr.port(),
|
||||
requested_crc_mode.advertised_flags(),
|
||||
);
|
||||
let hs_frame = build_rpc_frame(-1, &hs_payload, RpcChecksumMode::Crc32);
|
||||
if diag_level >= 1 {
|
||||
info!(
|
||||
write_key = %hex_dump(&wk),
|
||||
write_iv = %hex_dump(&wi),
|
||||
read_key = %hex_dump(&rk),
|
||||
read_iv = %hex_dump(&ri),
|
||||
srv_ip = %srv_ip_opt.map(|ip| hex_dump(&ip)).unwrap_or_default(),
|
||||
clt_ip = %clt_ip_opt.map(|ip| hex_dump(&ip)).unwrap_or_default(),
|
||||
srv_port = %hex_dump(&server_port_bytes),
|
||||
clt_port = %hex_dump(&client_port_bytes),
|
||||
crypto_ts = %hex_dump(&ts_bytes),
|
||||
nonce_srv = %hex_dump(&srv_nonce),
|
||||
nonce_clt = %hex_dump(&my_nonce),
|
||||
prekey_sha256_client = %hex_dump(&sha256(&prekey_client)),
|
||||
prekey_sha256_server = %hex_dump(&sha256(&prekey_server)),
|
||||
hs_plain = %hex_dump(&hs_frame),
|
||||
proxy_secret_sha256 = %hex_dump(&sha256(&secret)),
|
||||
"ME diag: derived keys and handshake plaintext"
|
||||
);
|
||||
}
|
||||
if diag_level >= 2 {
|
||||
info!(
|
||||
prekey_client = %hex_dump(&prekey_client),
|
||||
prekey_server = %hex_dump(&prekey_server),
|
||||
"ME diag: full prekey buffers"
|
||||
);
|
||||
}
|
||||
|
||||
let (encrypted_hs, write_iv) = cbc_encrypt_padded(&wk, &wi, &hs_frame)?;
|
||||
if diag_level >= 1 {
|
||||
info!(
|
||||
hs_cipher = %hex_dump(&encrypted_hs),
|
||||
"ME diag: handshake ciphertext"
|
||||
);
|
||||
}
|
||||
wr.write_all(&encrypted_hs).await.map_err(ProxyError::Io)?;
|
||||
wr.flush().await.map_err(ProxyError::Io)?;
|
||||
|
||||
let deadline = Instant::now() + Duration::from_secs(ME_HANDSHAKE_TIMEOUT_SECS);
|
||||
let mut enc_buf = BytesMut::with_capacity(256);
|
||||
let mut dec_buf = BytesMut::with_capacity(256);
|
||||
let mut read_iv = ri;
|
||||
let mut negotiated_crc_mode = RpcChecksumMode::Crc32;
|
||||
let mut handshake_ok = false;
|
||||
|
||||
while Instant::now() < deadline && !handshake_ok {
|
||||
let remaining = deadline - Instant::now();
|
||||
let mut tmp = [0u8; 256];
|
||||
let n = match timeout(remaining, rd.read(&mut tmp)).await {
|
||||
Ok(Ok(0)) => {
|
||||
return Err(ProxyError::Io(std::io::Error::new(
|
||||
std::io::ErrorKind::UnexpectedEof,
|
||||
"ME closed during handshake",
|
||||
)));
|
||||
}
|
||||
Ok(Ok(n)) => n,
|
||||
Ok(Err(e)) => return Err(ProxyError::Io(e)),
|
||||
Err(_) => return Err(ProxyError::TgHandshakeTimeout),
|
||||
};
|
||||
|
||||
enc_buf.extend_from_slice(&tmp[..n]);
|
||||
|
||||
let blocks = enc_buf.len() / 16 * 16;
|
||||
if blocks > 0 {
|
||||
let mut chunk = vec![0u8; blocks];
|
||||
chunk.copy_from_slice(&enc_buf[..blocks]);
|
||||
read_iv = cbc_decrypt_inplace(&rk, &read_iv, &mut chunk)?;
|
||||
dec_buf.extend_from_slice(&chunk);
|
||||
let _ = enc_buf.split_to(blocks);
|
||||
}
|
||||
|
||||
while dec_buf.len() >= 4 {
|
||||
let fl = u32::from_le_bytes(dec_buf[0..4].try_into().unwrap()) as usize;
|
||||
|
||||
if fl == 4 {
|
||||
let _ = dec_buf.split_to(4);
|
||||
continue;
|
||||
}
|
||||
if !(12..=(1 << 24)).contains(&fl) {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"Bad HS response frame len: {fl}"
|
||||
)));
|
||||
}
|
||||
if dec_buf.len() < fl {
|
||||
break;
|
||||
}
|
||||
|
||||
let frame = dec_buf.split_to(fl);
|
||||
let pe = fl - 4;
|
||||
let ec = u32::from_le_bytes(frame[pe..pe + 4].try_into().unwrap());
|
||||
let ac = rpc_crc(RpcChecksumMode::Crc32, &frame[..pe]);
|
||||
if ec != ac {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"HS CRC mismatch: 0x{ec:08x} vs 0x{ac:08x}"
|
||||
)));
|
||||
}
|
||||
|
||||
let hs_payload = &frame[8..pe];
|
||||
if hs_payload.len() < 4 {
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
"Handshake payload too short".to_string(),
|
||||
));
|
||||
}
|
||||
let hs_type = u32::from_le_bytes(hs_payload[0..4].try_into().unwrap());
|
||||
if hs_type == RPC_HANDSHAKE_ERROR_U32 {
|
||||
let err_code = if hs_payload.len() >= 8 {
|
||||
i32::from_le_bytes(hs_payload[4..8].try_into().unwrap())
|
||||
} else {
|
||||
-1
|
||||
};
|
||||
self.stats.increment_me_handshake_reject_total();
|
||||
self.stats.increment_me_handshake_error_code(err_code);
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"ME rejected handshake (error={err_code})"
|
||||
)));
|
||||
}
|
||||
let hs_flags = parse_handshake_flags(hs_payload)?;
|
||||
if hs_flags & 0xff != 0 {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"Unsupported handshake flags: 0x{hs_flags:08x}"
|
||||
)));
|
||||
}
|
||||
negotiated_crc_mode = if (hs_flags & requested_crc_mode.advertised_flags()) != 0 {
|
||||
RpcChecksumMode::from_handshake_flags(hs_flags)
|
||||
} else if (hs_flags & rpc_crypto_flags::USE_CRC32C) != 0 {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"Peer negotiated unsupported CRC flags: 0x{hs_flags:08x}"
|
||||
)));
|
||||
} else {
|
||||
RpcChecksumMode::Crc32
|
||||
};
|
||||
|
||||
handshake_ok = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if !handshake_ok {
|
||||
return Err(ProxyError::TgHandshakeTimeout);
|
||||
}
|
||||
|
||||
let handshake_ms = hs_start.elapsed().as_secs_f64() * 1000.0;
|
||||
info!(%addr, "RPC handshake OK");
|
||||
|
||||
Ok(HandshakeOutput {
|
||||
rd,
|
||||
wr,
|
||||
read_key: rk,
|
||||
read_iv,
|
||||
write_key: wk,
|
||||
write_iv,
|
||||
crc_mode: negotiated_crc_mode,
|
||||
handshake_ms,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn hex_dump(data: &[u8]) -> String {
|
||||
const MAX: usize = 64;
|
||||
let mut out = String::with_capacity(data.len() * 2 + 3);
|
||||
for (i, b) in data.iter().take(MAX).enumerate() {
|
||||
if i > 0 {
|
||||
out.push(' ');
|
||||
}
|
||||
out.push_str(&format!("{b:02x}"));
|
||||
}
|
||||
if data.len() > MAX {
|
||||
out.push_str(" …");
|
||||
}
|
||||
out
|
||||
}
|
||||
1224
src/transport/middle_proxy/health.rs
Normal file
1224
src/transport/middle_proxy/health.rs
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user