mirror of
https://github.com/telemt/telemt.git
synced 2026-04-14 17:14:09 +03:00
Compare commits
610 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4ea2226dcd | ||
|
|
d752a440e5 | ||
|
|
5ce2ee2dae | ||
|
|
6fd9f0595d | ||
|
|
fcdd8a9796 | ||
|
|
640468d4e7 | ||
|
|
02fe89f7d0 | ||
|
|
24df865503 | ||
|
|
e9f8c79498 | ||
|
|
24ff75701e | ||
|
|
4221230969 | ||
|
|
d87196c105 | ||
|
|
da89415961 | ||
|
|
2d98ebf3c3 | ||
|
|
fb5e9947bd | ||
|
|
2ea85c00d3 | ||
|
|
2a3b6b917f | ||
|
|
83ed9065b0 | ||
|
|
44b825edf5 | ||
|
|
487e95a66e | ||
|
|
c465c200c4 | ||
|
|
d7716ad875 | ||
|
|
edce194948 | ||
|
|
13fdff750d | ||
|
|
bdcf110c87 | ||
|
|
dd12997744 | ||
|
|
fc160913bf | ||
|
|
92c22ef16d | ||
|
|
aff22d0855 | ||
|
|
b3d3bca15a | ||
|
|
92f38392eb | ||
|
|
30ef8df1b3 | ||
|
|
2e174adf16 | ||
|
|
4e803b1412 | ||
|
|
9b174318ce | ||
|
|
99edcbe818 | ||
|
|
ef7dc2b80f | ||
|
|
691607f269 | ||
|
|
55561a23bc | ||
|
|
f32c34f126 | ||
|
|
8f3bdaec2c | ||
|
|
69b02caf77 | ||
|
|
3854955069 | ||
|
|
9b84fc7a5b | ||
|
|
e7cb9238dc | ||
|
|
0e2cbe6178 | ||
|
|
cd076aeeeb | ||
|
|
d683faf922 | ||
|
|
0494f8ac8b | ||
|
|
48ce59900e | ||
|
|
84e95fd229 | ||
|
|
a80be78345 | ||
|
|
64130dd02e | ||
|
|
d62a6e0417 | ||
|
|
3260746785 | ||
|
|
8066ea2163 | ||
|
|
813f1df63e | ||
|
|
09bdafa718 | ||
|
|
fb0f75df43 | ||
|
|
39255df549 | ||
|
|
456495fd62 | ||
|
|
83cadc0bf3 | ||
|
|
0b1a8cd3f8 | ||
|
|
565b4ee923 | ||
|
|
7a9c1e79c2 | ||
|
|
02c6af4912 | ||
|
|
8ba4dea59f | ||
|
|
ccfda10713 | ||
|
|
bd1327592e | ||
|
|
30b22fe2bf | ||
|
|
651f257a5d | ||
|
|
a9209fd3c7 | ||
|
|
4ae4ca8ca8 | ||
|
|
8be1ddc0d8 | ||
|
|
b55fa5ec8f | ||
|
|
16c6ce850e | ||
|
|
12251e730f | ||
|
|
925b10f9fc | ||
|
|
306b653318 | ||
|
|
8791a52b7e | ||
|
|
0d9470a840 | ||
|
|
0d320c20e0 | ||
|
|
9b3ba2e1c6 | ||
|
|
dbadbf0221 | ||
|
|
173624c838 | ||
|
|
de2047adf2 | ||
|
|
5df2fe9f97 | ||
|
|
2510ebaa79 | ||
|
|
314f30a434 | ||
|
|
c86a511638 | ||
|
|
f1efaf4491 | ||
|
|
716b4adef2 | ||
|
|
5876623bb0 | ||
|
|
6b9c7f7862 | ||
|
|
7ea6387278 | ||
|
|
4c2bc2f41f | ||
|
|
c86f35f059 | ||
|
|
3492566842 | ||
|
|
349bbbb8fa | ||
|
|
ead08981e7 | ||
|
|
068cf825b9 | ||
|
|
7269dfbdc5 | ||
|
|
533708f885 | ||
|
|
5e93ce258f | ||
|
|
1236505502 | ||
|
|
f7d451e689 | ||
|
|
e11da6d2ae | ||
|
|
d31b4cd6c8 | ||
|
|
f4ec6bb303 | ||
|
|
a6132bac38 | ||
|
|
624870109e | ||
|
|
cdf829de91 | ||
|
|
6ef51dbfb0 | ||
|
|
af5f0b9692 | ||
|
|
bd0dcfff15 | ||
|
|
ec4e48808e | ||
|
|
c293901669 | ||
|
|
f4e5a08614 | ||
|
|
430a0ae6b4 | ||
|
|
53d93880ad | ||
|
|
1706698a83 | ||
|
|
cb0832b803 | ||
|
|
c01ca40b6d | ||
|
|
cfec6dbb3c | ||
|
|
1fe1acadd4 | ||
|
|
225fc3e4ea | ||
|
|
4a0d88ad43 | ||
|
|
58ff0c7971 | ||
|
|
7d39bf1698 | ||
|
|
3b8eea762b | ||
|
|
07ec84d071 | ||
|
|
235642459a | ||
|
|
3799fc13c4 | ||
|
|
71261522bd | ||
|
|
762deac511 | ||
|
|
4300720d35 | ||
|
|
b7a8e759eb | ||
|
|
1a68dc1c2d | ||
|
|
a6d22e8a57 | ||
|
|
9477103f89 | ||
|
|
e589891706 | ||
|
|
fad4b652c4 | ||
|
|
96bfc223fe | ||
|
|
265b9a5f11 | ||
|
|
74ad9037de | ||
|
|
49f4a7bb22 | ||
|
|
ac453638b8 | ||
|
|
e7773b2bda | ||
|
|
6f1980dfd7 | ||
|
|
427fbef50f | ||
|
|
08609f4b6d | ||
|
|
501d802b8d | ||
|
|
e8ff39d2ae | ||
|
|
6c1b837d5b | ||
|
|
b112908c86 | ||
|
|
1e400d4cc2 | ||
|
|
a11c8b659b | ||
|
|
bc432f06e2 | ||
|
|
338636ede6 | ||
|
|
c05779208e | ||
|
|
7ba21ec5a8 | ||
|
|
d997c0b216 | ||
|
|
62cf4f0a1c | ||
|
|
e710fefed2 | ||
|
|
edef06edb5 | ||
|
|
7a0b015e65 | ||
|
|
8b2ec35c46 | ||
|
|
d324d84ec7 | ||
|
|
47b12f9489 | ||
|
|
a5967d0ca3 | ||
|
|
44cdfd4b23 | ||
|
|
25ffcf6081 | ||
|
|
60322807b6 | ||
|
|
ed93b0a030 | ||
|
|
2370c8d5e4 | ||
|
|
a3197b0fe1 | ||
|
|
e27ef04c3d | ||
|
|
cf7e2ebf4b | ||
|
|
685bfafe74 | ||
|
|
0f6fcf49a7 | ||
|
|
036f0e1569 | ||
|
|
291c22583f | ||
|
|
ee5b01bb31 | ||
|
|
ccacf78890 | ||
|
|
42db1191a8 | ||
|
|
9ce26d16cb | ||
|
|
12e68f805f | ||
|
|
62bf31fc73 | ||
|
|
29d4636249 | ||
|
|
9afaa28add | ||
|
|
6c12af2b94 | ||
|
|
8b39a4ef6d | ||
|
|
fa2423dadf | ||
|
|
449a87d2e3 | ||
|
|
a61882af6e | ||
|
|
bf11ebbaa3 | ||
|
|
e0d5561095 | ||
|
|
6b8aa7270e | ||
|
|
372f477927 | ||
|
|
05edbab06c | ||
|
|
3d9660f83e | ||
|
|
ac064fe773 | ||
|
|
eba158ff8b | ||
|
|
54ee6ff810 | ||
|
|
6d6cd30227 | ||
|
|
60231224ac | ||
|
|
144f81c473 | ||
|
|
04e6135935 | ||
|
|
4eebb4feb2 | ||
|
|
1f255d0aa4 | ||
|
|
9d2ff25bf5 | ||
|
|
7782336264 | ||
|
|
92a3529733 | ||
|
|
8ce8348cd5 | ||
|
|
e25b7f5ff8 | ||
|
|
d7182ae817 | ||
|
|
97f2dc8489 | ||
|
|
fb1f85559c | ||
|
|
da684b11fe | ||
|
|
896e129155 | ||
|
|
7ead0cd753 | ||
|
|
6cf9687dd6 | ||
|
|
4e30a4999c | ||
|
|
4af40f7121 | ||
|
|
1e4ba2eb56 | ||
|
|
eb921e2b17 | ||
|
|
76f1b51018 | ||
|
|
03ce267865 | ||
|
|
a6bfa3309e | ||
|
|
79a3720fd5 | ||
|
|
89543aed35 | ||
|
|
06292ff833 | ||
|
|
427294b103 | ||
|
|
fed9346444 | ||
|
|
f40b645c05 | ||
|
|
a66d5d56bb | ||
|
|
1b1bdfe99a | ||
|
|
49fc11ddfa | ||
|
|
5558900c44 | ||
|
|
5b1d976392 | ||
|
|
206f87fe64 | ||
|
|
5a09d30e1c | ||
|
|
f83e23c521 | ||
|
|
f9e9ddd0f7 | ||
|
|
6b8619d3c9 | ||
|
|
618b7a1837 | ||
|
|
16f166cec8 | ||
|
|
6efcbe9bbf | ||
|
|
e5ad27e26e | ||
|
|
53ec96b040 | ||
|
|
c6c3d71b08 | ||
|
|
e9a4281015 | ||
|
|
866c2fbd96 | ||
|
|
086c85d851 | ||
|
|
ce4e21c996 | ||
|
|
25ab79406f | ||
|
|
7538967d3c | ||
|
|
4a95f6d195 | ||
|
|
7d7ef84868 | ||
|
|
692d9476b9 | ||
|
|
b00b87032b | ||
|
|
ee07325eba | ||
|
|
1b3a17aedc | ||
|
|
6fdb568381 | ||
|
|
bb97ff0df9 | ||
|
|
b1cd7f9727 | ||
|
|
c13c1cf7e3 | ||
|
|
d2f08fb707 | ||
|
|
2356ae5584 | ||
|
|
429fa63c95 | ||
|
|
50e15896b3 | ||
|
|
09f56dede2 | ||
|
|
d9ae7bb044 | ||
|
|
d6214c6bbf | ||
|
|
3d3ddd37d7 | ||
|
|
1d71b7e90c | ||
|
|
8ba7bc9052 | ||
|
|
3397d82924 | ||
|
|
78c45626e1 | ||
|
|
68c3abee6c | ||
|
|
267c8bf2f1 | ||
|
|
d38d7f2bee | ||
|
|
8b47fc3575 | ||
|
|
122e4729c5 | ||
|
|
08138451d8 | ||
|
|
267619d276 | ||
|
|
f710a2192a | ||
|
|
b40eed126d | ||
|
|
0e2d42624f | ||
|
|
1f486e0df2 | ||
|
|
a4af254107 | ||
|
|
3f0c53b010 | ||
|
|
890bd98b17 | ||
|
|
02cfe1305c | ||
|
|
81843cc56c | ||
|
|
f86ced8e62 | ||
|
|
e2e471a78c | ||
|
|
9aed6c8631 | ||
|
|
5a0e44e311 | ||
|
|
a917dcc162 | ||
|
|
872b47067a | ||
|
|
ef51d0f62d | ||
|
|
75bfbe6e95 | ||
|
|
fc2ac3d10f | ||
|
|
d8dcbbb61e | ||
|
|
d08ddd718a | ||
|
|
1dfe38c5db | ||
|
|
829dc16fa3 | ||
|
|
fab79ccc69 | ||
|
|
9e0b871c8f | ||
|
|
23af3cad5d | ||
|
|
c1990d81c2 | ||
|
|
065cf21c66 | ||
|
|
4011812fda | ||
|
|
b5d0564f2a | ||
|
|
cfe8fc72a5 | ||
|
|
3e4b98b002 | ||
|
|
427d65627c | ||
|
|
ae8124d6c6 | ||
|
|
06b9693cf0 | ||
|
|
869d1429ac | ||
|
|
eaba926fe5 | ||
|
|
536e6417a0 | ||
|
|
ecad96374a | ||
|
|
4895217828 | ||
|
|
d0a8d31c3c | ||
|
|
4d83cc1f04 | ||
|
|
c4c91863f0 | ||
|
|
aae3e2665e | ||
|
|
a5c7a41c49 | ||
|
|
7cc78a5746 | ||
|
|
cf96e686d1 | ||
|
|
d4d867156a | ||
|
|
8c1d66a03e | ||
|
|
6ff29e43d3 | ||
|
|
208020817a | ||
|
|
6864f49292 | ||
|
|
726fb77ccc | ||
|
|
69be44b2b6 | ||
|
|
07ca94ce57 | ||
|
|
d050c4794a | ||
|
|
197f9867e0 | ||
|
|
78dfc2bc39 | ||
|
|
fcf37a1a69 | ||
|
|
cc9e71a737 | ||
|
|
eb96fcbf76 | ||
|
|
ad167f9b1a | ||
|
|
df7bd39f25 | ||
|
|
f4c047748d | ||
|
|
c5f5b43494 | ||
|
|
b2aaf404e1 | ||
|
|
d552ae84d0 | ||
|
|
3ab56f55e9 | ||
|
|
06d2cdef78 | ||
|
|
1be4422431 | ||
|
|
3d3428ad4d | ||
|
|
eaff96b8c1 | ||
|
|
7bf6f3e071 | ||
|
|
c3ebb42120 | ||
|
|
8d93695194 | ||
|
|
40711fda09 | ||
|
|
6ce25c6600 | ||
|
|
1a525f7d29 | ||
|
|
2dcbdbe302 | ||
|
|
1bd495a224 | ||
|
|
b0e6c04c54 | ||
|
|
d5a7882ad1 | ||
|
|
83fc9d6db3 | ||
|
|
c9a043d8d5 | ||
|
|
a74bdf8aea | ||
|
|
94e9bfbbb9 | ||
|
|
18c1444904 | ||
|
|
3b89c1ce7e | ||
|
|
100cb92ad1 | ||
|
|
7da062e448 | ||
|
|
1fd78e012d | ||
|
|
7304dacd60 | ||
|
|
3bff0629ca | ||
|
|
a79f0bbaf5 | ||
|
|
aa535bba0a | ||
|
|
eb3245b78f | ||
|
|
da84151e9f | ||
|
|
a303fee65f | ||
|
|
bae811f8f1 | ||
|
|
8892860490 | ||
|
|
0d2958fea7 | ||
|
|
dbd9b53940 | ||
|
|
8f1f051a54 | ||
|
|
471c680def | ||
|
|
be8742a229 | ||
|
|
781947a08a | ||
|
|
b295712dbb | ||
|
|
e8454ea370 | ||
|
|
ea88a40c8f | ||
|
|
2ea4c83d9d | ||
|
|
953fab68c4 | ||
|
|
0f6621d359 | ||
|
|
82bb93e8da | ||
|
|
25b18ab064 | ||
|
|
3e0dc91db6 | ||
|
|
26270bc651 | ||
|
|
be2ec4b9b4 | ||
|
|
766806f5df | ||
|
|
26cf6ff4fa | ||
|
|
b8add81018 | ||
|
|
5be81952f3 | ||
|
|
7ce2e33bae | ||
|
|
9e2f0af5be | ||
|
|
4d72cb1680 | ||
|
|
79eebeb9ef | ||
|
|
1045289539 | ||
|
|
3d0b32edf5 | ||
|
|
41601a40fc | ||
|
|
a2cc503e81 | ||
|
|
5ee4556cea | ||
|
|
487aa8fbce | ||
|
|
32a9405002 | ||
|
|
708bedc95e | ||
|
|
ce64bf1cee | ||
|
|
f4b79f2f79 | ||
|
|
9a907a2470 | ||
|
|
e6839adc17 | ||
|
|
5e98b35fb7 | ||
|
|
af35ad3923 | ||
|
|
8f47fa6dd8 | ||
|
|
453fb477db | ||
|
|
42ae148e78 | ||
|
|
a7e840c19b | ||
|
|
1593fc4e53 | ||
|
|
fc8010a861 | ||
|
|
7293b8eb32 | ||
|
|
6934faaf93 | ||
|
|
66fdc3a34d | ||
|
|
0c4d9301ec | ||
|
|
f7a7fb94d4 | ||
|
|
85fff5e30a | ||
|
|
fc28c1ad88 | ||
|
|
bb87a37686 | ||
|
|
bf2da8f5d8 | ||
|
|
2926b9f5c8 | ||
|
|
820ed8d346 | ||
|
|
e340b716b2 | ||
|
|
9edbbb692e | ||
|
|
356d64371a | ||
|
|
4be4670668 | ||
|
|
0768fee06a | ||
|
|
35ae455e2b | ||
|
|
433e6c9a20 | ||
|
|
34f5289fc3 | ||
|
|
97804d47ff | ||
|
|
b68e9d642e | ||
|
|
f31d9d42fe | ||
|
|
d941873cce | ||
|
|
b11a767741 | ||
|
|
301f829c3c | ||
|
|
76a02610d8 | ||
|
|
76bf5337e8 | ||
|
|
e76b388a05 | ||
|
|
f37e6cbe29 | ||
|
|
e54dce5366 | ||
|
|
c7464d53e1 | ||
|
|
03a6493147 | ||
|
|
36ef2f722d | ||
|
|
b9fda9e2c2 | ||
|
|
c5b590062c | ||
|
|
c0357b2890 | ||
|
|
4f7f7d6880 | ||
|
|
efba10f839 | ||
|
|
6ba12f35d0 | ||
|
|
6a57c23700 | ||
|
|
94b85afbc5 | ||
|
|
cf717032a1 | ||
|
|
d905de2dad | ||
|
|
c7bd1c98e7 | ||
|
|
d3302d77d2 | ||
|
|
df4494c37a | ||
|
|
b84189b21b | ||
|
|
9243661f56 | ||
|
|
bffe97b2b7 | ||
|
|
bee1dd97ee | ||
|
|
16670e36f5 | ||
|
|
5dad663b25 | ||
|
|
8375608aaa | ||
|
|
0057377ac6 | ||
|
|
078ed65a0e | ||
|
|
9872f0ed1b | ||
|
|
fb0cb54776 | ||
|
|
67bae1cf2a | ||
|
|
eb9ac7fae4 | ||
|
|
8046381939 | ||
|
|
650f9fd2a4 | ||
|
|
d4ebc7b5c6 | ||
|
|
7a4ccf8e82 | ||
|
|
73b40d386a | ||
|
|
3206ce50bb | ||
|
|
bdccb866fe | ||
|
|
9b5b382593 | ||
|
|
9886c9a8e7 | ||
|
|
cb3d32cc89 | ||
|
|
010eb5270f | ||
|
|
e33092530d | ||
|
|
e7d649b57f | ||
|
|
5f3d089003 | ||
|
|
4322509657 | ||
|
|
43990c9dc9 | ||
|
|
c03db683a5 | ||
|
|
168fd59187 | ||
|
|
8bd02d8099 | ||
|
|
a1db082ec0 | ||
|
|
9b9c11e7ab | ||
|
|
274b9d5e94 | ||
|
|
d888df6382 | ||
|
|
011b9a3cbf | ||
|
|
d67a587f3d | ||
|
|
478fc5dd89 | ||
|
|
a0e7210dff | ||
|
|
16b5dc56f0 | ||
|
|
303a6896bf | ||
|
|
9e84528801 | ||
|
|
685c228190 | ||
|
|
febe4d1ac0 | ||
|
|
e4f90cd7c1 | ||
|
|
3013291ea0 | ||
|
|
5d1dce7989 | ||
|
|
864f7fa9a5 | ||
|
|
e54fb3fffc | ||
|
|
dddf9f30dc | ||
|
|
3091b5168f | ||
|
|
ddc91c2d66 | ||
|
|
8072a97f7e | ||
|
|
558155ffaa | ||
|
|
ed329c2075 | ||
|
|
305c088bb7 | ||
|
|
debdbfd73c | ||
|
|
904c17c1b3 | ||
|
|
4a80bc8988 | ||
|
|
f9c41ab703 | ||
|
|
2112ba22f1 | ||
|
|
fbe9277f86 | ||
|
|
d1348e809f | ||
|
|
533613886a | ||
|
|
84f8b786e7 | ||
|
|
32bc3e1387 | ||
|
|
0fa5914501 | ||
|
|
9b790c7bf4 | ||
|
|
eda365c21f | ||
|
|
8de1318c9c | ||
|
|
7e566fd655 | ||
|
|
a80db2ddbc | ||
|
|
0694183ca6 | ||
|
|
1f9fb29a9b | ||
|
|
eccc69b79c | ||
|
|
da108b2d8c | ||
|
|
9d94f55cdc | ||
|
|
94a7058cc6 | ||
|
|
3d2e996cea | ||
|
|
f2455c9cb1 | ||
|
|
427c7dd375 | ||
|
|
e911a21a93 | ||
|
|
edabad87d7 | ||
|
|
2a65d29e3b | ||
|
|
c837a9b0c6 | ||
|
|
f7618416b6 | ||
|
|
0663e71c52 | ||
|
|
0599a6ec8c | ||
|
|
b2d36aac19 | ||
|
|
3d88ec5992 | ||
|
|
a693ed1e33 | ||
|
|
911a504e16 | ||
|
|
56cd0cd1a9 | ||
|
|
358ad65d5f | ||
|
|
2f5df6ade0 | ||
|
|
e3b7be81e7 | ||
|
|
9a25e8e810 | ||
|
|
1a6b39b829 | ||
|
|
a419cbbcf3 | ||
|
|
b97ea1293b | ||
|
|
5f54eb8270 | ||
|
|
06161abbbc | ||
|
|
aee549f745 | ||
|
|
50ec753c05 | ||
|
|
cf34c7e75c | ||
|
|
572e07a7fd | ||
|
|
4b5270137b | ||
|
|
246230c924 | ||
|
|
21416af153 | ||
|
|
b03312fa2e | ||
|
|
bcdbf033b2 | ||
|
|
0a054c4a01 | ||
|
|
eae7ad43d9 | ||
|
|
0894ef0089 | ||
|
|
954916960b | ||
|
|
91d16b96ee | ||
|
|
4bbadbc764 | ||
|
|
e4272ac35c | ||
|
|
46ee91c6b7 | ||
|
|
ad553f8fbb | ||
|
|
c0b4129209 | ||
|
|
fc47e4d584 | ||
|
|
32b16439c8 | ||
|
|
fd27449a26 | ||
|
|
3d13301711 | ||
|
|
963ec7206b | ||
|
|
9047511256 | ||
|
|
4ba907fdcd | ||
|
|
dae19c29a0 | ||
|
|
25530c8c44 | ||
|
|
aee44d3af2 | ||
|
|
714d83bea1 | ||
|
|
e1bfe69b76 |
19
.github/codeql/codeql-config.yml
vendored
Normal file
19
.github/codeql/codeql-config.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
name: "Rust without tests"
|
||||
|
||||
disable-default-queries: false
|
||||
|
||||
queries:
|
||||
- uses: security-extended
|
||||
- uses: security-and-quality
|
||||
- uses: ./.github/codeql/queries
|
||||
|
||||
query-filters:
|
||||
- exclude:
|
||||
id:
|
||||
- rust/unwrap-on-option
|
||||
- rust/unwrap-on-result
|
||||
- rust/expect-used
|
||||
|
||||
analysis:
|
||||
dataflow:
|
||||
default-precision: high
|
||||
4
.github/workflows/codeql.yml
vendored
4
.github/workflows/codeql.yml
vendored
@@ -2,9 +2,9 @@ name: "CodeQL Advanced"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
branches: [ "*" ]
|
||||
schedule:
|
||||
- cron: '0 0 * * 0'
|
||||
|
||||
|
||||
139
.github/workflows/release.yml
vendored
Normal file
139
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '[0-9]+.[0-9]+.[0-9]+'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build ${{ matrix.target }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- target: x86_64-unknown-linux-gnu
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-x86_64-linux-gnu
|
||||
- target: aarch64-unknown-linux-gnu
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-aarch64-linux-gnu
|
||||
- target: x86_64-unknown-linux-musl
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-x86_64-linux-musl
|
||||
- target: aarch64-unknown-linux-musl
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-aarch64-linux-musl
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: dtolnay/rust-toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
targets: ${{ matrix.target }}
|
||||
|
||||
- name: Install cross-compilation tools
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu
|
||||
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-${{ matrix.target }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.target }}-cargo-
|
||||
|
||||
- name: Install cross
|
||||
run: cargo install cross --git https://github.com/cross-rs/cross
|
||||
|
||||
- name: Build Release
|
||||
env:
|
||||
RUSTFLAGS: ${{ contains(matrix.target, 'musl') && '-C target-feature=+crt-static' || '' }}
|
||||
run: cross build --release --target ${{ matrix.target }}
|
||||
|
||||
- name: Package binary
|
||||
run: |
|
||||
cd target/${{ matrix.target }}/release
|
||||
tar -czvf ${{ matrix.asset_name }}.tar.gz ${{ matrix.artifact_name }}
|
||||
sha256sum ${{ matrix.asset_name }}.tar.gz > ${{ matrix.asset_name }}.sha256
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.asset_name }}
|
||||
path: |
|
||||
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.tar.gz
|
||||
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.sha256
|
||||
|
||||
build-docker-image:
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: docker/setup-qemu-action@v3
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract version
|
||||
id: vars
|
||||
run: echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
ghcr.io/${{ github.repository }}:${{ steps.vars.outputs.VERSION }}
|
||||
ghcr.io/${{ github.repository }}:latest
|
||||
|
||||
release:
|
||||
name: Create Release
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Create Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: artifacts/**/*
|
||||
generate_release_notes: true
|
||||
draft: false
|
||||
prerelease: ${{ contains(github.ref, '-rc') || contains(github.ref, '-beta') || contains(github.ref, '-alpha') }}
|
||||
12
.github/workflows/rust.yml
vendored
12
.github/workflows/rust.yml
vendored
@@ -2,9 +2,9 @@ name: Rust
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
branches: [ "*" ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -42,5 +42,13 @@ jobs:
|
||||
- name: Build Release
|
||||
run: cargo build --release --verbose
|
||||
|
||||
- name: Run tests
|
||||
run: cargo test --verbose
|
||||
|
||||
# clippy dont fail on warnings because of active development of telemt
|
||||
# and many warnings
|
||||
- name: Run clippy
|
||||
run: cargo clippy -- --cap-lints warn
|
||||
|
||||
- name: Check for unused dependencies
|
||||
run: cargo udeps || true
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -19,3 +19,5 @@ target
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
proxy-secret
|
||||
|
||||
58
.kilocode/rules-architect/AGENTS.md
Normal file
58
.kilocode/rules-architect/AGENTS.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# Architect Mode Rules for Telemt
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph Entry
|
||||
Client[Clients] --> Listener[TCP/Unix Listener]
|
||||
end
|
||||
|
||||
subgraph Proxy Layer
|
||||
Listener --> ClientHandler[ClientHandler]
|
||||
ClientHandler --> Handshake[Handshake Validator]
|
||||
Handshake --> |Valid| Relay[Relay Layer]
|
||||
Handshake --> |Invalid| Masking[Masking/TLS Fronting]
|
||||
end
|
||||
|
||||
subgraph Transport
|
||||
Relay --> MiddleProxy[Middle-End Proxy Pool]
|
||||
Relay --> DirectRelay[Direct DC Relay]
|
||||
MiddleProxy --> TelegramDC[Telegram DCs]
|
||||
DirectRelay --> TelegramDC
|
||||
end
|
||||
```
|
||||
|
||||
## Module Dependencies
|
||||
- [`src/main.rs`](src/main.rs) - Entry point, spawns all async tasks
|
||||
- [`src/config/`](src/config/) - Configuration loading with auto-migration
|
||||
- [`src/error.rs`](src/error.rs) - Error types, must be used by all modules
|
||||
- [`src/crypto/`](src/crypto/) - AES, SHA, random number generation
|
||||
- [`src/protocol/`](src/protocol/) - MTProto constants, frame encoding, obfuscation
|
||||
- [`src/stream/`](src/stream/) - Stream wrappers, buffer pool, frame codecs
|
||||
- [`src/proxy/`](src/proxy/) - Client handling, handshake, relay logic
|
||||
- [`src/transport/`](src/transport/) - Upstream management, middle-proxy, SOCKS support
|
||||
- [`src/stats/`](src/stats/) - Statistics and replay protection
|
||||
- [`src/ip_tracker.rs`](src/ip_tracker.rs) - Per-user IP tracking
|
||||
|
||||
## Key Architectural Constraints
|
||||
|
||||
### Middle-End Proxy Mode
|
||||
- Requires public IP on interface OR 1:1 NAT with STUN probing
|
||||
- Uses separate `proxy-secret` from Telegram (NOT user secrets)
|
||||
- Falls back to direct mode automatically on STUN mismatch
|
||||
|
||||
### TLS Fronting
|
||||
- Invalid handshakes are transparently proxied to `mask_host`
|
||||
- This is critical for DPI evasion - do not change this behavior
|
||||
- `mask_unix_sock` and `mask_host` are mutually exclusive
|
||||
|
||||
### Stream Architecture
|
||||
- Buffer pool is shared globally via Arc - prevents allocation storms
|
||||
- Frame codecs implement tokio-util Encoder/Decoder traits
|
||||
- State machine in [`src/stream/state.rs`](src/stream/state.rs) manages stream transitions
|
||||
|
||||
### Configuration Migration
|
||||
- [`ProxyConfig::load()`](src/config/mod.rs:641) mutates config in-place
|
||||
- New fields must have sensible defaults
|
||||
- DC203 override is auto-injected for CDN/media support
|
||||
23
.kilocode/rules-code/AGENTS.md
Normal file
23
.kilocode/rules-code/AGENTS.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# Code Mode Rules for Telemt
|
||||
|
||||
## Error Handling
|
||||
- Always use [`ProxyError`](src/error.rs:168) from [`src/error.rs`](src/error.rs) for proxy operations
|
||||
- [`HandshakeResult<T,R,W>`](src/error.rs:292) returns streams on bad client - these MUST be returned for masking, never dropped
|
||||
- Use [`Recoverable`](src/error.rs:110) trait to check if errors are retryable
|
||||
|
||||
## Configuration Changes
|
||||
- [`ProxyConfig::load()`](src/config/mod.rs:641) auto-mutates config - new fields should have defaults
|
||||
- DC203 override is auto-injected if missing - do not remove this behavior
|
||||
- When adding config fields, add migration logic in [`ProxyConfig::load()`](src/config/mod.rs:641)
|
||||
|
||||
## Crypto Code
|
||||
- [`SecureRandom`](src/crypto/random.rs) from [`src/crypto/random.rs`](src/crypto/random.rs) must be used for all crypto operations
|
||||
- Never use `rand::thread_rng()` directly - use the shared `Arc<SecureRandom>`
|
||||
|
||||
## Stream Handling
|
||||
- Buffer pool [`BufferPool`](src/stream/buffer_pool.rs) is shared via Arc - always use it instead of allocating
|
||||
- Frame codecs in [`src/stream/frame_codec.rs`](src/stream/frame_codec.rs) implement tokio-util's Encoder/Decoder traits
|
||||
|
||||
## Testing
|
||||
- Tests are inline in modules using `#[cfg(test)]`
|
||||
- Use `cargo test --lib <module_name>` to run tests for specific modules
|
||||
27
.kilocode/rules-debug/AGENTS.md
Normal file
27
.kilocode/rules-debug/AGENTS.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# Debug Mode Rules for Telemt
|
||||
|
||||
## Logging
|
||||
- `RUST_LOG` environment variable takes absolute priority over all config log levels
|
||||
- Log levels: `trace`, `debug`, `info`, `warn`, `error`
|
||||
- Use `RUST_LOG=debug cargo run` for detailed operational logs
|
||||
- Use `RUST_LOG=trace cargo run` for full protocol-level debugging
|
||||
|
||||
## Middle-End Proxy Debugging
|
||||
- Set `ME_DIAG=1` environment variable for high-precision cryptography diagnostics
|
||||
- STUN probe results are logged at startup - check for mismatch between local and reflected IP
|
||||
- If Middle-End fails, check `proxy_secret_path` points to valid file from https://core.telegram.org/getProxySecret
|
||||
|
||||
## Connection Issues
|
||||
- DC connectivity is logged at startup with RTT measurements
|
||||
- If DC ping fails, check `dc_overrides` for custom addresses
|
||||
- Use `prefer_ipv6=false` in config if IPv6 is unreliable
|
||||
|
||||
## TLS Fronting Issues
|
||||
- Invalid handshakes are proxied to `mask_host` - check this host is reachable
|
||||
- `mask_unix_sock` and `mask_host` are mutually exclusive - only one can be set
|
||||
- If `mask_unix_sock` is set, socket must exist before connections arrive
|
||||
|
||||
## Common Errors
|
||||
- `ReplayAttack` - client replayed a handshake nonce, potential attack
|
||||
- `TimeSkew` - client clock is off, can disable with `ignore_time_skew=true`
|
||||
- `TgHandshakeTimeout` - upstream DC connection failed, check network
|
||||
410
AGENTS.md
Normal file
410
AGENTS.md
Normal file
@@ -0,0 +1,410 @@
|
||||
## System Prompt — Production Rust Codebase: Modification and Architecture Guidelines
|
||||
|
||||
You are a senior Rust Engineer and pricipal Rust Architect acting as a strict code reviewer and implementation partner.
|
||||
Your responses are precise, minimal, and architecturally sound. You are working on a production-grade Rust codebase: follow these rules strictly.
|
||||
|
||||
---
|
||||
|
||||
### 0. Priority Resolution — Scope Control
|
||||
|
||||
This section resolves conflicts between code quality enforcement and scope limitation.
|
||||
|
||||
When editing or extending existing code, you MUST audit the affected files and fix:
|
||||
|
||||
- Comment style violations (missing, non-English, decorative, trailing).
|
||||
- Missing or incorrect documentation on public items.
|
||||
- Comment placement issues (trailing comments → move above the code).
|
||||
|
||||
These are **coordinated changes** — they are always in scope.
|
||||
|
||||
The following changes are FORBIDDEN without explicit user approval:
|
||||
|
||||
- Renaming types, traits, functions, modules, or variables.
|
||||
- Altering business logic, control flow, or data transformations.
|
||||
- Changing module boundaries, architectural layers, or public API surface.
|
||||
- Adding or removing functions, structs, enums, or trait implementations.
|
||||
- Fixing compiler warnings or removing unused code.
|
||||
|
||||
If such issues are found during your work, list them under a `## ⚠️ Out-of-scope observations` section at the end of your response. Include file path, context, and a brief description. Do not apply these changes.
|
||||
|
||||
The user can override this behavior with explicit commands:
|
||||
|
||||
- `"Do not modify existing code"` — touch only what was requested, skip coordinated fixes.
|
||||
- `"Make minimal changes"` — no coordinated fixes, narrowest possible diff.
|
||||
- `"Fix everything"` — apply all coordinated fixes and out-of-scope observations.
|
||||
|
||||
### Core Rule
|
||||
|
||||
The codebase must never enter an invalid intermediate state.
|
||||
No response may leave the repository in a condition that requires follow-up fixes.
|
||||
|
||||
---
|
||||
|
||||
### 1. Comments and Documentation
|
||||
|
||||
- All comments MUST be written in English.
|
||||
- Write only comments that add technical value: architecture decisions, intent, invariants, non-obvious implementation details.
|
||||
- Place all comments on separate lines above the relevant code.
|
||||
- Use `///` doc-comments for public items. Use `//` for internal clarifications.
|
||||
|
||||
Correct example:
|
||||
|
||||
```rust
|
||||
// Handles MTProto client authentication and establishes encrypted session state.
|
||||
fn handle_authenticated_client(...) { ... }
|
||||
```
|
||||
|
||||
Incorrect examples:
|
||||
|
||||
```rust
|
||||
let x = 5; // set x to 5
|
||||
```
|
||||
|
||||
```rust
|
||||
// This function does stuff
|
||||
fn do_stuff() { ... }
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 2. File Size and Module Structure
|
||||
|
||||
- Files MUST NOT exceed 350–550 lines.
|
||||
- If a file exceeds this limit, split it into submodules organized by responsibility (e.g., protocol, transport, state, handlers).
|
||||
- Parent modules MUST declare and describe their submodules.
|
||||
- Maintain clear architectural boundaries between modules.
|
||||
|
||||
Correct example:
|
||||
|
||||
```rust
|
||||
// Client connection handling logic.
|
||||
// Submodules:
|
||||
// - handshake: MTProto handshake implementation
|
||||
// - relay: traffic forwarding logic
|
||||
// - state: client session state machine
|
||||
|
||||
pub mod handshake;
|
||||
pub mod relay;
|
||||
pub mod state;
|
||||
```
|
||||
|
||||
Git discipline:
|
||||
|
||||
- Use local git for versioning and diffs.
|
||||
- Write clear, descriptive commit messages in English that explain both *what* changed and *why*.
|
||||
|
||||
---
|
||||
|
||||
### 3. Formatting
|
||||
|
||||
- Preserve the existing formatting style of the project exactly as-is.
|
||||
- Reformat code only when explicitly instructed to do so.
|
||||
- Do not run `cargo fmt` unless explicitly instructed.
|
||||
|
||||
---
|
||||
|
||||
### 4. Change Safety and Validation
|
||||
|
||||
- If anything is unclear, STOP and ask specific, targeted questions before proceeding.
|
||||
- List exactly what is ambiguous and offer possible interpretations for the user to choose from.
|
||||
- Prefer clarification over assumptions. Do not guess intent, behavior, or missing requirements.
|
||||
- Actively ask questions before making architectural or behavioral changes.
|
||||
|
||||
---
|
||||
|
||||
### 5. Warnings and Unused Code
|
||||
|
||||
- Leave all warnings, unused variables, functions, imports, and dead code untouched unless explicitly instructed to modify them.
|
||||
- These may be intentional or part of work-in-progress code.
|
||||
- `todo!()` and `unimplemented!()` are permitted and should not be removed or replaced unless explicitly instructed.
|
||||
|
||||
---
|
||||
|
||||
### 6. Architectural Integrity
|
||||
|
||||
- Preserve existing architecture unless explicitly instructed to refactor.
|
||||
- Do not introduce hidden behavioral changes.
|
||||
- Do not introduce implicit refactors.
|
||||
- Keep changes minimal, isolated, and intentional.
|
||||
|
||||
---
|
||||
|
||||
### 7. When Modifying Code
|
||||
|
||||
You MUST:
|
||||
|
||||
- Maintain architectural consistency with the existing codebase.
|
||||
- Document non-obvious logic with comments that describe *why*, not *what*.
|
||||
- Limit changes strictly to the requested scope (plus coordinated fixes per Section 0).
|
||||
- Keep all existing symbol names unless renaming is explicitly requested.
|
||||
- Preserve global formatting as-is
|
||||
- Result every modification in a self-contained, compilable, runnable state of the codebase
|
||||
|
||||
You MUST NOT:
|
||||
|
||||
- Use placeholders: no `// ... rest of code`, no `// implement here`, no `/* TODO */` stubs that replace existing working code. Write full, working implementation. If the implementation is unclear, ask first
|
||||
- Refactor code outside the requested scope
|
||||
- Make speculative improvements
|
||||
- Spawn multiple agents for EDITING
|
||||
- Produce partial changes
|
||||
- Introduce references to entities that are not yet implemented
|
||||
- Leave TODO placeholders in production paths
|
||||
|
||||
Note: `todo!()` and `unimplemented!()` are allowed as idiomatic Rust markers for genuinely unfinished code paths.
|
||||
|
||||
Every change must:
|
||||
- compile,
|
||||
- pass type checks,
|
||||
- have no broken imports,
|
||||
- preserve invariants,
|
||||
- not rely on future patches.
|
||||
|
||||
If the task requires multiple phases:
|
||||
- either implement all required phases,
|
||||
- or explicitly refuse and explain missing dependencies.
|
||||
|
||||
---
|
||||
|
||||
### 8. Decision Process for Complex Changes
|
||||
|
||||
When facing a non-trivial modification, follow this sequence:
|
||||
|
||||
1. **Clarify**: Restate the task in one sentence to confirm understanding.
|
||||
2. **Assess impact**: Identify which modules, types, and invariants are affected.
|
||||
3. **Propose**: Describe the intended change before implementing it.
|
||||
4. **Implement**: Make the minimal, isolated change.
|
||||
5. **Verify**: Explain why the change preserves existing behavior and architectural integrity.
|
||||
|
||||
---
|
||||
|
||||
### 9. Context Awareness
|
||||
|
||||
- When provided with partial code, assume the rest of the codebase exists and functions correctly unless stated otherwise.
|
||||
- Reference existing types, functions, and module structures by their actual names as shown in the provided code.
|
||||
- When the provided context is insufficient to make a safe change, request the missing context explicitly.
|
||||
- Spawn multiple agents for SEARCHING information, code, functions
|
||||
|
||||
---
|
||||
|
||||
### 10. Response Format
|
||||
|
||||
#### Language Policy
|
||||
|
||||
- Code, comments, commit messages, documentation ONLY ON **English**!
|
||||
- Reasoning and explanations in response text on language from promt
|
||||
|
||||
#### Response Structure
|
||||
|
||||
Your response MUST consist of two sections:
|
||||
|
||||
**Section 1: `## Reasoning`**
|
||||
|
||||
- What needs to be done and why.
|
||||
- Which files and modules are affected.
|
||||
- Architectural decisions and their rationale.
|
||||
- Potential risks or side effects.
|
||||
|
||||
**Section 2: `## Changes`**
|
||||
|
||||
- For each modified or created file: the filename on a separate line in backticks, followed by the code block.
|
||||
- For files **under 200 lines**: return the full file with all changes applied.
|
||||
- For files **over 200 lines**: return only the changed functions/blocks with at least 3 lines of surrounding context above and below. If the user requests the full file, provide it.
|
||||
- New files: full file content.
|
||||
- End with a suggested git commit message in English.
|
||||
|
||||
#### Reporting Out-of-Scope Issues
|
||||
|
||||
If during modification you discover issues outside the requested scope (potential bugs, unsafe code, architectural concerns, missing error handling, unused imports, dead code):
|
||||
|
||||
- Do not fix them silently.
|
||||
- List them under `## ⚠️ Out-of-scope observations` at the end of your response.
|
||||
- Include: file path, line/function context, brief description of the issue, and severity estimate.
|
||||
|
||||
#### Splitting Protocol
|
||||
|
||||
If the response exceeds the output limit:
|
||||
|
||||
1. End the current part with: **SPLIT: PART N — CONTINUE? (remaining: file_list)**
|
||||
2. List the files that will be provided in subsequent parts.
|
||||
3. Wait for user confirmation before continuing.
|
||||
4. No single file may be split across parts.
|
||||
|
||||
## 11. Anti-LLM Degeneration Safeguards (Principal-Paranoid, Visionary)
|
||||
|
||||
This section exists to prevent common LLM failure modes: scope creep, semantic drift, cargo-cult refactors, performance regressions, contract breakage, and hidden behavior changes.
|
||||
|
||||
### 11.1 Non-Negotiable Invariants
|
||||
|
||||
- **No semantic drift:** Do not reinterpret requirements, rename concepts, or change meaning of existing terms.
|
||||
- **No “helpful refactors”:** Any refactor not explicitly requested is forbidden.
|
||||
- **No architectural drift:** Do not introduce new layers, patterns, abstractions, or “clean architecture” migrations unless requested.
|
||||
- **No dependency drift:** Do not add crates, features, or versions unless explicitly requested.
|
||||
- **No behavior drift:** If a change could alter runtime behavior, you MUST call it out explicitly in `## Reasoning` and justify it.
|
||||
|
||||
### 11.2 Minimal Surface Area Rule
|
||||
|
||||
- Touch the smallest number of files possible.
|
||||
- Prefer local changes over cross-cutting edits.
|
||||
- Do not “align style” across a file/module—only adjust the modified region.
|
||||
- Do not reorder items, imports, or code unless required for correctness.
|
||||
|
||||
### 11.3 No Implicit Contract Changes
|
||||
|
||||
Contracts include:
|
||||
- public APIs, trait bounds, visibility, error types, timeouts/retries, logging semantics, metrics semantics,
|
||||
- protocol formats, framing, padding, keepalive cadence, state machine transitions,
|
||||
- concurrency guarantees, cancellation behavior, backpressure behavior.
|
||||
|
||||
Rule:
|
||||
- If you change a contract, you MUST update all dependents in the same patch AND document the contract delta explicitly.
|
||||
|
||||
### 11.4 Hot-Path Preservation (Performance Paranoia)
|
||||
|
||||
- Do not introduce extra allocations, cloning, or formatting in hot paths.
|
||||
- Do not add logging/metrics on hot paths unless requested.
|
||||
- Do not add new locks or broaden lock scope.
|
||||
- Prefer `&str` / slices / borrowed data where the codebase already does so.
|
||||
- Avoid `String` building for errors/logs if it changes current patterns.
|
||||
|
||||
If you cannot prove performance neutrality, label it as risk in `## Reasoning`.
|
||||
|
||||
### 11.5 Async / Concurrency Safety (Cancellation & Backpressure)
|
||||
|
||||
- No blocking calls inside async contexts.
|
||||
- Preserve cancellation safety: do not introduce `await` between lock acquisition and critical invariants unless already present.
|
||||
- Preserve backpressure: do not replace bounded channels with unbounded, do not remove flow control.
|
||||
- Do not change task lifecycle semantics (spawn patterns, join handles, shutdown order) unless requested.
|
||||
- Do not introduce `tokio::spawn` / background tasks unless explicitly requested.
|
||||
|
||||
### 11.6 Error Semantics Integrity
|
||||
|
||||
- Do not replace structured errors with generic strings.
|
||||
- Do not widen/narrow error types or change error categories without explicit approval.
|
||||
- Avoid introducing panics in production paths (`unwrap`, `expect`) unless the codebase already treats that path as impossible and documented.
|
||||
|
||||
### 11.7 “No New Abstractions” Default
|
||||
|
||||
Default stance:
|
||||
- No new traits, generics, macros, builder patterns, type-level cleverness, or “frameworking”.
|
||||
- If abstraction is necessary, prefer the smallest possible local helper (private function) and justify it.
|
||||
|
||||
### 11.8 Negative-Diff Protection
|
||||
|
||||
Avoid “diff inflation” patterns:
|
||||
- mass edits,
|
||||
- moving code between files,
|
||||
- rewrapping long lines,
|
||||
- rearranging module order,
|
||||
- renaming for aesthetics.
|
||||
|
||||
If a diff becomes large, STOP and ask before proceeding.
|
||||
|
||||
### 11.9 Consistency with Existing Style (But Not Style Refactors)
|
||||
|
||||
- Follow existing conventions of the touched module (naming, error style, return patterns).
|
||||
- Do not enforce global “best practices” that the codebase does not already use.
|
||||
|
||||
### 11.10 Two-Phase Safety Gate (Plan → Patch)
|
||||
|
||||
For non-trivial changes:
|
||||
1) Provide a micro-plan (1–5 bullets): what files, what functions, what invariants, what risks.
|
||||
2) Implement exactly that plan—no extra improvements.
|
||||
|
||||
### 11.11 Pre-Response Checklist (Hard Gate)
|
||||
|
||||
Before final output, verify internally:
|
||||
|
||||
- No unresolved symbols / broken imports.
|
||||
- No partially updated call sites.
|
||||
- No new public surface changes unless requested.
|
||||
- No transitional states / TODO placeholders replacing working code.
|
||||
- Changes are atomic: the repository remains buildable and runnable.
|
||||
- Any behavior change is explicitly stated.
|
||||
|
||||
If any check fails: fix it before responding.
|
||||
|
||||
### 11.12 Truthfulness Policy (No Hallucinated Claims)
|
||||
|
||||
- Do not claim “this compiles” or “tests pass” unless you actually verified with the available tooling/context.
|
||||
- If verification is not possible, state: “Not executed; reasoning-based consistency check only.”
|
||||
|
||||
### 11.13 Visionary Guardrail: Preserve Optionality
|
||||
|
||||
When multiple valid designs exist, prefer the one that:
|
||||
- minimally constrains future evolution,
|
||||
- preserves existing extension points,
|
||||
- avoids locking the project into a new paradigm,
|
||||
- keeps interfaces stable and implementation local.
|
||||
|
||||
Default to reversible changes.
|
||||
|
||||
### 11.14 Stop Conditions
|
||||
|
||||
STOP and ask targeted questions if:
|
||||
- required context is missing,
|
||||
- a change would cross module boundaries,
|
||||
- a contract might change,
|
||||
- concurrency/protocol invariants are unclear,
|
||||
- the diff is growing beyond a minimal patch.
|
||||
|
||||
No guessing.
|
||||
|
||||
### 12. Invariant Preservation
|
||||
|
||||
You MUST explicitly preserve:
|
||||
- Thread-safety guarantees (`Send` / `Sync` expectations).
|
||||
- Memory safety assumptions (no hidden `unsafe` expansions).
|
||||
- Lock ordering and deadlock invariants.
|
||||
- State machine correctness (no new invalid transitions).
|
||||
- Backward compatibility of serialized formats (if applicable).
|
||||
|
||||
If a change touches concurrency, networking, protocol logic, or state machines,
|
||||
you MUST explain why existing invariants remain valid.
|
||||
|
||||
### 13. Error Handling Policy
|
||||
|
||||
- Do not replace structured errors with generic strings.
|
||||
- Preserve existing error propagation semantics.
|
||||
- Do not widen or narrow error types without approval.
|
||||
- Avoid introducing panics in production paths.
|
||||
- Prefer explicit error mapping over implicit conversions.
|
||||
|
||||
### 14. Test Safety
|
||||
|
||||
- Do not modify existing tests unless the task explicitly requires it.
|
||||
- Do not weaken assertions.
|
||||
- Preserve determinism in testable components.
|
||||
|
||||
### 15. Security Constraints
|
||||
|
||||
- Do not weaken cryptographic assumptions.
|
||||
- Do not modify key derivation logic without explicit request.
|
||||
- Do not change constant-time behavior.
|
||||
- Do not introduce logging of secrets.
|
||||
- Preserve TLS/MTProto protocol correctness.
|
||||
|
||||
### 16. Logging Policy
|
||||
|
||||
- Do not introduce excessive logging in hot paths.
|
||||
- Do not log sensitive data.
|
||||
- Preserve existing log levels and style.
|
||||
|
||||
### 17. Pre-Response Verification Checklist
|
||||
|
||||
Before producing the final answer, verify internally:
|
||||
|
||||
- The change compiles conceptually.
|
||||
- No unresolved symbols exist.
|
||||
- All modified call sites are updated.
|
||||
- No accidental behavioral changes were introduced.
|
||||
- Architectural boundaries remain intact.
|
||||
|
||||
### 18. Atomic Change Principle
|
||||
Every patch must be **atomic and production-safe**.
|
||||
* **Self-contained** — no dependency on future patches or unimplemented components.
|
||||
* **Build-safe** — the project must compile successfully after the change.
|
||||
* **Contract-consistent** — no partial interface or behavioral changes; all dependent code must be updated within the same patch.
|
||||
* **No transitional states** — no placeholders, incomplete refactors, or temporary inconsistencies.
|
||||
|
||||
**Invariant:** After any single patch, the repository remains fully functional and buildable.
|
||||
|
||||
19
CONTRIBUTING.md
Normal file
19
CONTRIBUTING.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# Issues - Rules
|
||||
## What it is not
|
||||
- NOT Question and Answer
|
||||
- NOT Helpdesk
|
||||
|
||||
# Pull Requests - Rules
|
||||
## General
|
||||
- ONLY signed and verified commits
|
||||
- ONLY from your name
|
||||
- DO NOT commit with `codex` or `claude` as author/commiter
|
||||
- PREFER `flow` branch for development, not `main`
|
||||
|
||||
## AI
|
||||
We are not against modern tools, like AI, where you act as a principal or architect, but we consider it important:
|
||||
|
||||
- you really understand what you're doing
|
||||
- you understand the relationships and dependencies of the components being modified
|
||||
- you understand the architecture of Telegram MTProto, MTProxy, Middle-End KDF at least generically
|
||||
- you DO NOT commit for the sake of commits, but to help the community, core-developers and ordinary users
|
||||
621
Cargo.lock
generated
621
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
23
Cargo.toml
23
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "telemt"
|
||||
version = "1.2.0"
|
||||
version = "3.3.5"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
@@ -9,7 +9,7 @@ libc = "0.2"
|
||||
|
||||
# Async runtime
|
||||
tokio = { version = "1.42", features = ["full", "tracing"] }
|
||||
tokio-util = { version = "0.7", features = ["codec"] }
|
||||
tokio-util = { version = "0.7", features = ["full"] }
|
||||
|
||||
# Crypto
|
||||
aes = "0.8"
|
||||
@@ -20,15 +20,18 @@ sha1 = "0.10"
|
||||
md-5 = "0.10"
|
||||
hmac = "0.12"
|
||||
crc32fast = "1.4"
|
||||
crc32c = "0.6"
|
||||
zeroize = { version = "1.8", features = ["derive"] }
|
||||
|
||||
# Network
|
||||
socket2 = { version = "0.5", features = ["all"] }
|
||||
nix = { version = "0.28", default-features = false, features = ["net"] }
|
||||
|
||||
# Serialization
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
toml = "0.8"
|
||||
x509-parser = "0.15"
|
||||
|
||||
# Utils
|
||||
bytes = "1.9"
|
||||
@@ -37,7 +40,7 @@ tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
parking_lot = "0.12"
|
||||
dashmap = "5.5"
|
||||
lru = "0.12"
|
||||
lru = "0.16"
|
||||
rand = "0.9"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
hex = "0.4"
|
||||
@@ -45,9 +48,21 @@ base64 = "0.22"
|
||||
url = "2.5"
|
||||
regex = "1.11"
|
||||
crossbeam-queue = "0.3"
|
||||
num-bigint = "0.4"
|
||||
num-traits = "0.2"
|
||||
anyhow = "1.0"
|
||||
|
||||
# HTTP
|
||||
reqwest = { version = "0.12", features = ["rustls-tls"], default-features = false }
|
||||
notify = { version = "6", features = ["macos_fsevent"] }
|
||||
ipnetwork = "0.20"
|
||||
hyper = { version = "1", features = ["server", "http1"] }
|
||||
hyper-util = { version = "0.1", features = ["tokio", "server-auto"] }
|
||||
http-body-util = "0.1"
|
||||
httpdate = "1.0"
|
||||
tokio-rustls = { version = "0.26", default-features = false, features = ["tls12"] }
|
||||
rustls = { version = "0.23", default-features = false, features = ["std", "tls12", "ring"] }
|
||||
webpki-roots = "0.26"
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-test = "0.4"
|
||||
@@ -57,4 +72,4 @@ futures = "0.3"
|
||||
|
||||
[[bench]]
|
||||
name = "crypto_bench"
|
||||
harness = false
|
||||
harness = false
|
||||
|
||||
43
Dockerfile
Normal file
43
Dockerfile
Normal file
@@ -0,0 +1,43 @@
|
||||
# ==========================
|
||||
# Stage 1: Build
|
||||
# ==========================
|
||||
FROM rust:1.88-slim-bookworm AS builder
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
pkg-config \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
COPY Cargo.toml Cargo.lock* ./
|
||||
RUN mkdir src && echo 'fn main() {}' > src/main.rs && \
|
||||
cargo build --release 2>/dev/null || true && \
|
||||
rm -rf src
|
||||
|
||||
COPY . .
|
||||
RUN cargo build --release && strip target/release/telemt
|
||||
|
||||
# ==========================
|
||||
# Stage 2: Runtime
|
||||
# ==========================
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN useradd -r -s /usr/sbin/nologin telemt
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=builder /build/target/release/telemt /app/telemt
|
||||
COPY config.toml /app/config.toml
|
||||
|
||||
RUN chown -R telemt:telemt /app
|
||||
USER telemt
|
||||
|
||||
EXPOSE 443
|
||||
EXPOSE 9090
|
||||
|
||||
ENTRYPOINT ["/app/telemt"]
|
||||
CMD ["config.toml"]
|
||||
17
LICENSING.md
Normal file
17
LICENSING.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# LICENSING
|
||||
## Licenses for Versions
|
||||
| Version | License |
|
||||
|---------|---------------|
|
||||
| 1.0 | NO LICNESE |
|
||||
| 1.1 | NO LICENSE |
|
||||
| 1.2 | NO LICENSE |
|
||||
| 2.0 | NO LICENSE |
|
||||
| 3.0 | TELEMT UL 1 |
|
||||
|
||||
### License Types
|
||||
- **NO LICENSE** = ***ALL RIGHT RESERVED***
|
||||
- **TELEMT UL1** - work in progress license for source code of `telemt`, which encourages:
|
||||
- fair use,
|
||||
- contributions,
|
||||
- distribution,
|
||||
- but prohibits NOT mentioning the authors
|
||||
309
README.md
309
README.md
@@ -1,30 +1,85 @@
|
||||
# Telemt - MTProxy on Rust + Tokio
|
||||
|
||||
**Telemt** is a fast, secure, and feature-rich server written in Rust: it fully implements the official Telegram proxy algo and adds many production-ready improvements such as connection pooling, replay protection, detailed statistics, masking from "prying" eyes
|
||||
***Löst Probleme, bevor andere überhaupt wissen, dass sie existieren*** / ***It solves problems before others even realize they exist***
|
||||
|
||||
## Emergency
|
||||
**Важное сообщение для пользователей из России**
|
||||
**Telemt** is a fast, secure, and feature-rich server written in Rust: it fully implements the official Telegram proxy algo and adds many production-ready improvements such as:
|
||||
- [ME Pool + Reader/Writer + Registry + Refill + Adaptive Floor + Trio-State + Generation Lifecycle](https://github.com/telemt/telemt/blob/main/docs/model/MODEL.en.md)
|
||||
- [Full-covered API w/ management](https://github.com/telemt/telemt/blob/main/docs/API.md)
|
||||
- Anti-Replay on Sliding Window
|
||||
- Prometheus-format Metrics
|
||||
- TLS-Fronting and TCP-Splicing for masking from "prying" eyes
|
||||
|
||||
Мы работаем над проектом с Нового года и сейчас готовим новый релиз - 1.2
|
||||
[**Telemt Chat in Telegram**](https://t.me/telemtrs)
|
||||
|
||||
В нём имплементируется поддержка Middle Proxy Protocol - основного терминатора для Ad Tag:
|
||||
работа над ним идёт с 6 ферваля, а уже 10 февраля произошли "громкие события"...
|
||||
## NEWS and EMERGENCY
|
||||
### ✈️ Telemt 3 is released!
|
||||
<table>
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
Если у вас есть компетенции в асинхронных сетевых приложениях - мы открыты к предложениям и pull requests
|
||||
### 🇷🇺 RU
|
||||
|
||||
**Important message for users from Russia**
|
||||
#### Релиз 3.3.3 LTS - 6 марта
|
||||
|
||||
We've been working on the project since December 30 and are currently preparing a new release – 1.2
|
||||
6 марта мы выпустили Telemt **3.3.3**
|
||||
|
||||
It implements support for the Middle Proxy Protocol – the primary point for the Ad Tag:
|
||||
development on it started on February 6th, and by February 10th, "big activity" in Russia had already "taken place"...
|
||||
Это первая версия telemt работающая в комплексных условиях и при этом предоставляющая API
|
||||
|
||||
If you have expertise in asynchronous network applications – we are open to ideas and pull requests!
|
||||
В ней используется новый алгоритм - ME NoWait, который вместе с Adaptive Floor и моделью усовершенствованного доступа к KDF Fingerprint на RwLock позволяет достигать максимальную производительность, даже в условиях lossy-сети
|
||||
|
||||
Будем рады вашему фидбеку и предложениям по улучшению — особенно в части **статистики** и **UX**
|
||||
|
||||
Релиз:
|
||||
[3.3.3](https://github.com/telemt/telemt/releases/tag/3.3.3)
|
||||
|
||||
---
|
||||
|
||||
Если у вас есть компетенции в:
|
||||
|
||||
- Асинхронных сетевых приложениях
|
||||
- Анализе трафика
|
||||
- Реверс-инжиниринге
|
||||
- Сетевых расследованиях
|
||||
|
||||
Мы открыты к архитектурным предложениям, идеям и pull requests
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
### 🇬🇧 EN
|
||||
|
||||
#### Release 3.3.3 LTS - March 6
|
||||
|
||||
On March 6, we released Telemt **3.3.3**
|
||||
|
||||
This is the first telemt's version designed to operate reliably in complex network conditions while also providing a runtime API!
|
||||
|
||||
The release introduces a new algorithm — ME NoWait, which combined with Adaptive Floor and an improved KDF Fingerprint access model based on RwLock, it enables the system to achieve maximum performance even in lossy network environments
|
||||
|
||||
We are looking forward to your feedback and improvement proposals — especially regarding **statistics** and **UX**
|
||||
|
||||
Release:
|
||||
[3.3.3](https://github.com/telemt/telemt/releases/tag/3.3.3)
|
||||
|
||||
---
|
||||
|
||||
If you have expertise in:
|
||||
|
||||
- Asynchronous network applications
|
||||
- Traffic analysis
|
||||
- Reverse engineering
|
||||
- Network forensics
|
||||
|
||||
We welcome ideas, architectural feedback, and pull requests.
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
# Features
|
||||
💥 The configuration structure has changed since version 1.1.0.0, change it in your environment!
|
||||
💥 The configuration structure has changed since version 1.1.0.0. change it in your environment!
|
||||
|
||||
⚓ Our implementation of **TLS-fronting** is one of the most deeply debugged, focused, advanced and *almost* **"behaviorally consistent to real"**: we are confident we have it right - [see evidence on our validation and traces](#recognizability-for-dpi-and-crawler)
|
||||
⚓ Our implementation of **TLS-fronting** is one of the most deeply debugged, focused, advanced and *almost* **"behaviorally consistent to real"**: we are confident we have it right - [see evidence on our validation and traces](#recognizability-for-dpi-and-crawler)
|
||||
|
||||
⚓ Our ***Middle-End Pool*** is fastest by design in standard scenarios, compared to other implementations of connecting to the Middle-End Proxy: non dramatically, but usual
|
||||
|
||||
# GOTO
|
||||
- [Features](#features)
|
||||
@@ -44,7 +99,9 @@ If you have expertise in asynchronous network applications – we are open to id
|
||||
- [Telegram Calls](#telegram-calls-via-mtproxy)
|
||||
- [DPI](#how-does-dpi-see-mtproxy-tls)
|
||||
- [Whitelist on Network Level](#whitelist-on-ip)
|
||||
- [Too many open files](#too-many-open-files)
|
||||
- [Build](#build)
|
||||
- [Docker](#docker)
|
||||
- [Why Rust?](#why-rust)
|
||||
|
||||
## Features
|
||||
@@ -60,174 +117,18 @@ If you have expertise in asynchronous network applications – we are open to id
|
||||
- Extensive logging via `trace` and `debug` with `RUST_LOG` method
|
||||
|
||||
## Quick Start Guide
|
||||
**This software is designed for Debian-based OS: in addition to Debian, these are Ubuntu, Mint, Kali, MX and many other Linux**
|
||||
1. Download release
|
||||
```bash
|
||||
wget https://github.com/telemt/telemt/releases/latest/download/telemt
|
||||
```
|
||||
2. Move to Bin Folder
|
||||
```bash
|
||||
mv telemt /bin
|
||||
```
|
||||
4. Make Executable
|
||||
```bash
|
||||
chmod +x /bin/telemt
|
||||
```
|
||||
5. Go to [How to use?](#how-to-use) section for for further steps
|
||||
|
||||
## How to use?
|
||||
### Telemt via Systemd
|
||||
**This instruction "assume" that you:**
|
||||
- logged in as root or executed `su -` / `sudo su`
|
||||
- you already have an assembled and executable `telemt` in /bin folder as a result of the [Quick Start Guide](#quick-start-guide) or [Build](#build)
|
||||
### [Quick Start Guide RU](docs/QUICK_START_GUIDE.ru.md)
|
||||
### [Quick Start Guide EN](docs/QUICK_START_GUIDE.en.md)
|
||||
|
||||
**0. Check port and generate secrets**
|
||||
|
||||
The port you have selected for use should be MISSING from the list, when:
|
||||
```bash
|
||||
netstat -lnp
|
||||
```
|
||||
|
||||
Generate 16 bytes/32 characters HEX with OpenSSL or another way:
|
||||
```bash
|
||||
openssl rand -hex 16
|
||||
```
|
||||
OR
|
||||
```bash
|
||||
xxd -l 16 -p /dev/urandom
|
||||
```
|
||||
OR
|
||||
```bash
|
||||
python3 -c 'import os; print(os.urandom(16).hex())'
|
||||
```
|
||||
|
||||
**1. Place your config to /etc/telemt.toml**
|
||||
|
||||
Open nano
|
||||
```bash
|
||||
nano /etc/telemt.toml
|
||||
```
|
||||
paste your config from [Configuration](#configuration) section
|
||||
|
||||
then Ctrl+X -> Y -> Enter to save
|
||||
|
||||
**2. Create service on /etc/systemd/system/telemt.service**
|
||||
|
||||
Open nano
|
||||
```bash
|
||||
nano /etc/systemd/system/telemt.service
|
||||
```
|
||||
paste this Systemd Module
|
||||
```bash
|
||||
[Unit]
|
||||
Description=Telemt
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/bin
|
||||
ExecStart=/bin/telemt /etc/telemt.toml
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
then Ctrl+X -> Y -> Enter to save
|
||||
|
||||
**3.** In Shell type `systemctl start telemt` - it must start with zero exit-code
|
||||
|
||||
**4.** In Shell type `systemctl status telemt` - there you can reach info about current MTProxy status
|
||||
|
||||
**5.** In Shell type `systemctl enable telemt` - then telemt will start with system startup, after the network is up
|
||||
|
||||
## Configuration
|
||||
### Minimal Configuration for First Start
|
||||
```toml
|
||||
# === UI ===
|
||||
# Users to show in the startup log (tg:// links)
|
||||
show_link = ["hello"]
|
||||
|
||||
# === General Settings ===
|
||||
[general]
|
||||
prefer_ipv6 = false
|
||||
fast_mode = true
|
||||
use_middle_proxy = false
|
||||
# ad_tag = "..."
|
||||
|
||||
[general.modes]
|
||||
classic = false
|
||||
secure = false
|
||||
tls = true
|
||||
|
||||
# === Server Binding ===
|
||||
[server]
|
||||
port = 443
|
||||
listen_addr_ipv4 = "0.0.0.0"
|
||||
listen_addr_ipv6 = "::"
|
||||
# metrics_port = 9090
|
||||
# metrics_whitelist = ["127.0.0.1", "::1"]
|
||||
|
||||
# Listen on multiple interfaces/IPs (overrides listen_addr_*)
|
||||
[[server.listeners]]
|
||||
ip = "0.0.0.0"
|
||||
# announce_ip = "1.2.3.4" # Optional: Public IP for tg:// links
|
||||
|
||||
[[server.listeners]]
|
||||
ip = "::"
|
||||
|
||||
# === Timeouts (in seconds) ===
|
||||
[timeouts]
|
||||
client_handshake = 15
|
||||
tg_connect = 10
|
||||
client_keepalive = 60
|
||||
client_ack = 300
|
||||
|
||||
# === Anti-Censorship & Masking ===
|
||||
[censorship]
|
||||
tls_domain = "petrovich.ru"
|
||||
mask = true
|
||||
mask_port = 443
|
||||
# mask_host = "petrovich.ru" # Defaults to tls_domain if not set
|
||||
# mask_unix_sock = "/var/run/nginx.sock" # Unix socket (mutually exclusive with mask_host)
|
||||
fake_cert_len = 2048
|
||||
|
||||
# === Access Control & Users ===
|
||||
# username "hello" is used for example
|
||||
[access]
|
||||
replay_check_len = 65536
|
||||
ignore_time_skew = false
|
||||
|
||||
[access.users]
|
||||
# format: "username" = "32_hex_chars_secret"
|
||||
hello = "00000000000000000000000000000000"
|
||||
|
||||
# [access.user_max_tcp_conns]
|
||||
# hello = 50
|
||||
|
||||
# [access.user_data_quota]
|
||||
# hello = 1073741824 # 1 GB
|
||||
|
||||
# === Upstreams & Routing ===
|
||||
# By default, direct connection is used, but you can add SOCKS proxy
|
||||
|
||||
# Direct - Default
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
enabled = true
|
||||
weight = 10
|
||||
|
||||
# SOCKS5
|
||||
# [[upstreams]]
|
||||
# type = "socks5"
|
||||
# address = "127.0.0.1:9050"
|
||||
# enabled = false
|
||||
# weight = 1
|
||||
```
|
||||
### Advanced
|
||||
#### Adtag
|
||||
To use channel advertising and usage statistics from Telegram, get Adtag from [@mtproxybot](https://t.me/mtproxybot), add this parameter to section `[General]`
|
||||
#### Adtag (per-user)
|
||||
To use channel advertising and usage statistics from Telegram, get an Adtag from [@mtproxybot](https://t.me/mtproxybot). Set it per user in `[access.user_ad_tags]` (32 hex chars):
|
||||
```toml
|
||||
ad_tag = "00000000000000000000000000000000" # Replace zeros to your adtag from @mtproxybot
|
||||
[access.user_ad_tags]
|
||||
username1 = "11111111111111111111111111111111" # Replace with your tag from @mtproxybot
|
||||
username2 = "22222222222222222222222222222222"
|
||||
```
|
||||
#### Listening and Announce IPs
|
||||
To specify listening address and/or address in links, add to section `[[server.listeners]]` of config.toml:
|
||||
@@ -377,6 +278,23 @@ Keep-Alive: timeout=60
|
||||
- in China behind the Great Firewall
|
||||
- in Russia on mobile networks, less in wired networks
|
||||
- in Iran during "activity"
|
||||
### Too many open files
|
||||
- On a fresh Linux install the default open file limit is low; under load `telemt` may fail with `Accept error: Too many open files`
|
||||
- **Systemd**: add `LimitNOFILE=65536` to the `[Service]` section (already included in the example above)
|
||||
- **Docker**: add `--ulimit nofile=65536:65536` to your `docker run` command, or in `docker-compose.yml`:
|
||||
```yaml
|
||||
ulimits:
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
```
|
||||
- **System-wide** (optional): add to `/etc/security/limits.conf`:
|
||||
```
|
||||
* soft nofile 1048576
|
||||
* hard nofile 1048576
|
||||
root soft nofile 1048576
|
||||
root hard nofile 1048576
|
||||
```
|
||||
|
||||
|
||||
## Build
|
||||
@@ -395,9 +313,44 @@ chmod +x /bin/telemt
|
||||
telemt config.toml
|
||||
```
|
||||
|
||||
## Docker
|
||||
**Quick start (Docker Compose)**
|
||||
|
||||
1. Edit `config.toml` in repo root (at least: port, users secrets, tls_domain)
|
||||
2. Start container:
|
||||
```bash
|
||||
docker compose up -d --build
|
||||
```
|
||||
3. Check logs:
|
||||
```bash
|
||||
docker compose logs -f telemt
|
||||
```
|
||||
4. Stop:
|
||||
```bash
|
||||
docker compose down
|
||||
```
|
||||
|
||||
**Notes**
|
||||
- `docker-compose.yml` maps `./config.toml` to `/app/config.toml` (read-only)
|
||||
- By default it publishes `443:443` and runs with dropped capabilities (only `NET_BIND_SERVICE` is added)
|
||||
- If you really need host networking (usually only for some IPv6 setups) uncomment `network_mode: host`
|
||||
|
||||
**Run without Compose**
|
||||
```bash
|
||||
docker build -t telemt:local .
|
||||
docker run --name telemt --restart unless-stopped \
|
||||
-p 443:443 \
|
||||
-e RUST_LOG=info \
|
||||
-v "$PWD/config.toml:/app/config.toml:ro" \
|
||||
--read-only \
|
||||
--cap-drop ALL --cap-add NET_BIND_SERVICE \
|
||||
--ulimit nofile=65536:65536 \
|
||||
telemt:local
|
||||
```
|
||||
|
||||
## Why Rust?
|
||||
- Long-running reliability and idempotent behavior
|
||||
- Rust’s deterministic resource management - RAII
|
||||
- Rust's deterministic resource management - RAII
|
||||
- No garbage collector
|
||||
- Memory safety and reduced attack surface
|
||||
- Tokio's asynchronous architecture
|
||||
|
||||
34
ROADMAP.md
Normal file
34
ROADMAP.md
Normal file
@@ -0,0 +1,34 @@
|
||||
### 3.0.0 Anschluss
|
||||
- **Middle Proxy now is stable**, confirmed on canary-deploy over ~20 users
|
||||
- Ad-tag now is working
|
||||
- DC=203/CDN now is working over ME
|
||||
- `getProxyConfig` and `ProxySecret` are automated
|
||||
- Version order is now in format `3.0.0` - without Windows-style "microfixes"
|
||||
|
||||
### 3.0.1 Kabelsammler
|
||||
- Handshake timeouts fixed
|
||||
- Connectivity logging refactored
|
||||
- Docker: tmpfs for ProxyConfig and ProxySecret
|
||||
- Public Host and Port in config
|
||||
- ME Relays Head-of-Line Blocking fixed
|
||||
- ME Ping
|
||||
|
||||
### 3.0.2 Microtrencher
|
||||
- New [network] section
|
||||
- ME Fixes
|
||||
- Small bugs coverage
|
||||
|
||||
### 3.0.3 Ausrutscher
|
||||
- ME as stateful, no conn-id migration
|
||||
- No `flush()` on datapath after RpcWriter
|
||||
- Hightech parser for IPv6 without regexp
|
||||
- `nat_probe = true` by default
|
||||
- Timeout for `recv()` in STUN-client
|
||||
- ConnRegistry review
|
||||
- Dualstack emergency reconnect
|
||||
|
||||
### 3.0.4 Schneeflecken
|
||||
- Only WARN and Links in Normal log
|
||||
- Consistent IP-family detection
|
||||
- Includes for config
|
||||
- `nonce_frame_hex` in log only with `DEBUG`
|
||||
697
config.full.toml
Normal file
697
config.full.toml
Normal file
@@ -0,0 +1,697 @@
|
||||
# ==============================================================================
|
||||
#
|
||||
# TELEMT — Advanced Rust-based Telegram MTProto Proxy
|
||||
# Full Configuration Reference
|
||||
#
|
||||
# This file is both a working config and a complete documentation.
|
||||
# Every parameter is explained. Read it top to bottom before deploying.
|
||||
#
|
||||
# Quick Start:
|
||||
# 1. Set [server].port to your desired port (443 recommended)
|
||||
# 2. Generate a secret: openssl rand -hex 16
|
||||
# 3. Put it in [access.users] under a name you choose
|
||||
# 4. Set [censorship].tls_domain to a popular unblocked HTTPS site
|
||||
# 5. Set your public IP in [general].middle_proxy_nat_ip
|
||||
# and [general.links].public_host
|
||||
# 6. Set announce IP in [[server.listeners]]
|
||||
# 7. Run Telemt. It prints a tg:// link. Send it to your users.
|
||||
#
|
||||
# Modes of Operation:
|
||||
# Direct Mode (use_middle_proxy = false)
|
||||
# Connects straight to Telegram DCs via TCP. Simple, fast, low overhead.
|
||||
# No ad_tag support. No CDN DC support (203, etc).
|
||||
#
|
||||
# Middle-Proxy Mode (use_middle_proxy = true)
|
||||
# Connects to Telegram Middle-End servers via RPC protocol.
|
||||
# Required for ad_tag monetization and CDN support.
|
||||
# Requires proxy_secret_path and a valid public IP.
|
||||
#
|
||||
# ==============================================================================
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# LEGACY TOP-LEVEL FIELDS
|
||||
# ==============================================================================
|
||||
|
||||
# Deprecated. Use [general.links].show instead.
|
||||
# Accepts "*" for all users, or an array like ["alice", "bob"].
|
||||
show_link = ["0"]
|
||||
|
||||
# Fallback Datacenter index (1-5) when a client requests an unknown DC ID.
|
||||
# DC 2 is Amsterdam (Europe), closest for most CIS users.
|
||||
# default_dc = 2
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# GENERAL SETTINGS
|
||||
# ==============================================================================
|
||||
|
||||
[general]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Core Protocol
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Coalesce the MTProto handshake and first data payload into a single TCP packet.
|
||||
# Significantly reduces connection latency. No reason to disable.
|
||||
fast_mode = true
|
||||
|
||||
# How the proxy connects to Telegram servers.
|
||||
# false = Direct TCP to Telegram DCs (simple, low overhead)
|
||||
# true = Middle-End RPC protocol (required for ad_tag and CDN DCs)
|
||||
use_middle_proxy = true
|
||||
|
||||
# 32-char hex Ad-Tag from @MTProxybot for sponsored channel injection.
|
||||
# Only works when use_middle_proxy = true.
|
||||
# Obtain yours: message @MTProxybot on Telegram, register your proxy.
|
||||
# ad_tag = "00000000000000000000000000000000"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Middle-End Authentication
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Path to the Telegram infrastructure AES key file.
|
||||
# Auto-downloaded from https://core.telegram.org/getProxySecret on first run.
|
||||
# This key authenticates your proxy with Middle-End servers.
|
||||
proxy_secret_path = "proxy-secret"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public IP Configuration (Critical for Middle-Proxy Mode)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Your server's PUBLIC IPv4 address.
|
||||
# Middle-End servers need this for the cryptographic Key Derivation Function.
|
||||
# If your server has a direct public IP, set it here.
|
||||
# If behind NAT (AWS, Docker, etc.), this MUST be your external IP.
|
||||
# If omitted, Telemt uses STUN to auto-detect (see middle_proxy_nat_probe).
|
||||
# middle_proxy_nat_ip = "203.0.113.10"
|
||||
|
||||
# Auto-detect public IP via STUN servers defined in [network].
|
||||
# Set to false if you hardcoded middle_proxy_nat_ip above.
|
||||
# Set to true if you want automatic detection.
|
||||
middle_proxy_nat_probe = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Middle-End Connection Pool
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Number of persistent multiplexed RPC connections to ME servers.
|
||||
# All client traffic is routed through these "fat pipes".
|
||||
# 8 handles thousands of concurrent users comfortably.
|
||||
middle_proxy_pool_size = 8
|
||||
|
||||
# Legacy field. Connections kept initialized but idle as warm standby.
|
||||
middle_proxy_warm_standby = 16
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Middle-End Keepalive
|
||||
# Telegram ME servers aggressively kill idle TCP connections.
|
||||
# These settings send periodic RPC_PING frames to keep pipes alive.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
me_keepalive_enabled = true
|
||||
|
||||
# Base interval between pings in seconds.
|
||||
me_keepalive_interval_secs = 25
|
||||
|
||||
# Random jitter added to interval to prevent all connections pinging simultaneously.
|
||||
me_keepalive_jitter_secs = 5
|
||||
|
||||
# Randomize ping payload bytes to prevent DPI from fingerprinting ping patterns.
|
||||
me_keepalive_payload_random = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Client-Side Limits
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Max buffered ciphertext per client (bytes) when upstream is slow.
|
||||
# Acts as backpressure to prevent memory exhaustion. 256KB is safe.
|
||||
crypto_pending_buffer = 262144
|
||||
|
||||
# Maximum single MTProto frame size from client. 16MB is protocol standard.
|
||||
max_client_frame = 16777216
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Crypto Desynchronization Logging
|
||||
# Desync errors usually mean DPI/GFW is tampering with connections.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# true = full forensics (trace ID, IP hash, hex dumps) for EVERY desync event
|
||||
# false = deduplicated logging, one entry per time window (prevents log spam)
|
||||
# Set true if you are actively debugging DPI interference.
|
||||
desync_all_full = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Beobachten — Built-in Honeypot / Active Probe Tracker
|
||||
# Tracks IPs that fail handshakes or behave like TLS scanners.
|
||||
# Output file can be fed into fail2ban or iptables for auto-blocking.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
beobachten = true
|
||||
|
||||
# How long (minutes) to remember a suspicious IP before expiring it.
|
||||
beobachten_minutes = 30
|
||||
|
||||
# How often (seconds) to flush tracker state to disk.
|
||||
beobachten_flush_secs = 15
|
||||
|
||||
# File path for the tracker output.
|
||||
beobachten_file = "cache/beobachten.txt"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Hardswap — Zero-Downtime ME Pool Rotation
|
||||
# When Telegram updates ME server IPs, Hardswap creates a completely new pool,
|
||||
# waits until it is fully ready, migrates traffic, then kills the old pool.
|
||||
# Users experience zero interruption.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
hardswap = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ME Pool Warmup Staggering
|
||||
# When creating a new pool, connections are opened one by one with delays
|
||||
# to avoid a burst of SYN packets that could trigger ISP flood protection.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
me_warmup_stagger_enabled = true
|
||||
|
||||
# Delay between each connection creation (milliseconds).
|
||||
me_warmup_step_delay_ms = 500
|
||||
|
||||
# Random jitter added to the delay (milliseconds).
|
||||
me_warmup_step_jitter_ms = 300
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ME Reconnect Backoff
|
||||
# If an ME server drops the connection, Telemt retries with this strategy.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Max simultaneous reconnect attempts per DC.
|
||||
me_reconnect_max_concurrent_per_dc = 8
|
||||
|
||||
# Exponential backoff base (milliseconds).
|
||||
me_reconnect_backoff_base_ms = 500
|
||||
|
||||
# Backoff ceiling (milliseconds). Will never wait longer than this.
|
||||
me_reconnect_backoff_cap_ms = 30000
|
||||
|
||||
# Number of instant retries before switching to exponential backoff.
|
||||
me_reconnect_fast_retry_count = 12
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# NAT Mismatch Behavior
|
||||
# If STUN-detected IP differs from local interface IP (you are behind NAT).
|
||||
# false = abort ME mode (safe default)
|
||||
# true = force ME mode anyway (use if you know your NAT setup is correct)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
stun_iface_mismatch_ignore = false
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Logging
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# File to log unknown DC requests (DC IDs outside standard 1-5).
|
||||
unknown_dc_log_path = "unknown-dc.txt"
|
||||
|
||||
# Verbosity: "debug" | "verbose" | "normal" | "silent"
|
||||
log_level = "normal"
|
||||
|
||||
# Disable ANSI color codes in log output (useful for file logging).
|
||||
disable_colors = false
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# FakeTLS Record Sizing
|
||||
# Buffer small MTProto packets into larger TLS records to mimic real HTTPS.
|
||||
# Real HTTPS servers send records close to MTU size (~1400 bytes).
|
||||
# A stream of tiny TLS records is a strong DPI signal.
|
||||
# Set to 0 to disable. Set to 1400 for realistic HTTPS emulation.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
fast_mode_min_tls_record = 1400
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Periodic Updates
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# How often (seconds) to re-fetch ME server lists and proxy secrets
|
||||
# from core.telegram.org. Keeps your proxy in sync with Telegram infrastructure.
|
||||
update_every = 300
|
||||
|
||||
# How often (seconds) to force a Hardswap even if the ME map is unchanged.
|
||||
# Shorter intervals mean shorter-lived TCP flows, harder for DPI to profile.
|
||||
me_reinit_every_secs = 600
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Hardswap Warmup Tuning
|
||||
# Fine-grained control over how the new pool is warmed up before traffic switch.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
me_hardswap_warmup_delay_min_ms = 1000
|
||||
me_hardswap_warmup_delay_max_ms = 2000
|
||||
me_hardswap_warmup_extra_passes = 3
|
||||
me_hardswap_warmup_pass_backoff_base_ms = 500
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Config Update Debouncing
|
||||
# Telegram sometimes pushes transient/broken configs. Debouncing requires
|
||||
# N consecutive identical fetches before applying a change.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# ME server list must be identical for this many fetches before applying.
|
||||
me_config_stable_snapshots = 2
|
||||
|
||||
# Minimum seconds between config applications.
|
||||
me_config_apply_cooldown_secs = 300
|
||||
|
||||
# Proxy secret must be identical for this many fetches before applying.
|
||||
proxy_secret_stable_snapshots = 2
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Proxy Secret Rotation
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Apply newly downloaded secrets at runtime without restart.
|
||||
proxy_secret_rotate_runtime = true
|
||||
|
||||
# Maximum acceptable secret length (bytes). Rejects abnormally large secrets.
|
||||
proxy_secret_len_max = 256
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Hardswap Drain Settings
|
||||
# Controls graceful shutdown of old ME connections during pool rotation.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Seconds to keep old connections alive for in-flight data before force-closing.
|
||||
me_pool_drain_ttl_secs = 90
|
||||
|
||||
# Minimum ratio of healthy connections in new pool before draining old pool.
|
||||
# 0.8 = at least 80% of new pool must be ready.
|
||||
me_pool_min_fresh_ratio = 0.8
|
||||
|
||||
# Maximum seconds to wait for drain to complete before force-killing.
|
||||
me_reinit_drain_timeout_secs = 120
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# NTP Clock Check
|
||||
# MTProto uses timestamps. Clock drift > 30 seconds breaks handshakes.
|
||||
# Telemt checks on startup and warns if out of sync.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
ntp_check = true
|
||||
ntp_servers = ["pool.ntp.org"]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Auto-Degradation
|
||||
# If ME servers become completely unreachable (ISP blocking),
|
||||
# automatically fall back to Direct Mode so users stay connected.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
auto_degradation_enabled = true
|
||||
|
||||
# Number of DC groups that must be unreachable before triggering fallback.
|
||||
degradation_min_unavailable_dc_groups = 2
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# ALLOWED CLIENT PROTOCOLS
|
||||
# Only enable what you need. In censored regions, TLS-only is safest.
|
||||
# ==============================================================================
|
||||
|
||||
[general.modes]
|
||||
|
||||
# Classic MTProto. Unobfuscated length prefixes. Trivially detected by DPI.
|
||||
# No reason to enable unless you have ancient clients.
|
||||
classic = false
|
||||
|
||||
# Obfuscated MTProto with randomized padding. Better than classic, but
|
||||
# still detectable by statistical analysis of packet sizes.
|
||||
secure = false
|
||||
|
||||
# FakeTLS (ee-secrets). Wraps MTProto in TLS 1.3 framing.
|
||||
# To DPI, it looks like a normal HTTPS connection.
|
||||
# This should be the ONLY enabled mode in censored environments.
|
||||
tls = true
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# STARTUP LINK GENERATION
|
||||
# Controls what tg:// invite links are printed to console on startup.
|
||||
# ==============================================================================
|
||||
|
||||
[general.links]
|
||||
|
||||
# Which users to generate links for.
|
||||
# "*" = all users, or an array like ["alice", "bob"].
|
||||
show = "*"
|
||||
|
||||
# IP or domain to embed in the tg:// link.
|
||||
# If omitted, Telemt uses STUN to auto-detect.
|
||||
# Set this to your server's public IP or domain for reliable links.
|
||||
# public_host = "proxy.example.com"
|
||||
|
||||
# Port to embed in the tg:// link.
|
||||
# If omitted, uses [server].port.
|
||||
# public_port = 443
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# NETWORK & IP RESOLUTION
|
||||
# ==============================================================================
|
||||
|
||||
[network]
|
||||
|
||||
# Enable IPv4 for outbound connections to Telegram.
|
||||
ipv4 = true
|
||||
|
||||
# Enable IPv6 for outbound connections to Telegram.
|
||||
ipv6 = false
|
||||
|
||||
# Prefer IPv4 (4) or IPv6 (6) when both are available.
|
||||
prefer = 4
|
||||
|
||||
# Experimental: use both IPv4 and IPv6 ME servers simultaneously.
|
||||
# May improve reliability but doubles connection count.
|
||||
multipath = false
|
||||
|
||||
# STUN servers for external IP discovery.
|
||||
# Used for Middle-Proxy KDF (if nat_probe=true) and link generation.
|
||||
stun_servers = [
|
||||
"stun.l.google.com:5349",
|
||||
"stun1.l.google.com:3478",
|
||||
"stun.gmx.net:3478",
|
||||
"stun.l.google.com:19302"
|
||||
]
|
||||
|
||||
# If UDP STUN is blocked, attempt TCP-based STUN as fallback.
|
||||
stun_tcp_fallback = true
|
||||
|
||||
# If all STUN fails, use HTTP APIs to discover public IP.
|
||||
http_ip_detect_urls = [
|
||||
"https://ifconfig.me/ip",
|
||||
"https://api.ipify.org"
|
||||
]
|
||||
|
||||
# Cache discovered public IP to this file to survive restarts.
|
||||
cache_public_ip_path = "cache/public_ip.txt"
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# SERVER BINDING & METRICS
|
||||
# ==============================================================================
|
||||
|
||||
[server]
|
||||
|
||||
# TCP port to listen on.
|
||||
# 443 is recommended (looks like normal HTTPS traffic).
|
||||
port = 443
|
||||
|
||||
# IPv4 bind address. "0.0.0.0" = all interfaces.
|
||||
listen_addr_ipv4 = "0.0.0.0"
|
||||
|
||||
# IPv6 bind address. "::" = all interfaces.
|
||||
listen_addr_ipv6 = "::"
|
||||
|
||||
# Unix socket listener (for reverse proxy setups with Nginx/HAProxy).
|
||||
# listen_unix_sock = "/var/run/telemt.sock"
|
||||
# listen_unix_sock_perm = "0660"
|
||||
|
||||
# Enable PROXY protocol header parsing.
|
||||
# Set true ONLY if Telemt is behind HAProxy/Nginx that injects PROXY headers.
|
||||
# If enabled without a proxy in front, clients will fail to connect.
|
||||
proxy_protocol = false
|
||||
|
||||
# Prometheus metrics HTTP endpoint port.
|
||||
# Uncomment to enable. Access at http://your-server:9090/metrics
|
||||
# metrics_port = 9090
|
||||
|
||||
# IP ranges allowed to access the metrics endpoint.
|
||||
metrics_whitelist = [
|
||||
"127.0.0.1/32",
|
||||
"::1/128"
|
||||
]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Listener Overrides
|
||||
# Define explicit listeners with specific bind IPs and announce IPs.
|
||||
# The announce IP is what gets embedded in tg:// links and sent to ME servers.
|
||||
# You MUST set announce to your server's public IP for ME mode to work.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# [[server.listeners]]
|
||||
# ip = "0.0.0.0"
|
||||
# announce = "203.0.113.10"
|
||||
# reuse_allow = false
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# TIMEOUTS (seconds unless noted)
|
||||
# ==============================================================================
|
||||
|
||||
[timeouts]
|
||||
|
||||
# Maximum time for client to complete FakeTLS + MTProto handshake.
|
||||
client_handshake = 15
|
||||
|
||||
# Maximum time to establish TCP connection to upstream Telegram DC.
|
||||
tg_connect = 10
|
||||
|
||||
# TCP keepalive interval for client connections.
|
||||
client_keepalive = 60
|
||||
|
||||
# Maximum client inactivity before dropping the connection.
|
||||
client_ack = 300
|
||||
|
||||
# Instant retry count for a single ME endpoint before giving up on it.
|
||||
me_one_retry = 3
|
||||
|
||||
# Timeout (milliseconds) for a single ME endpoint connection attempt.
|
||||
me_one_timeout_ms = 1500
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# ANTI-CENSORSHIP / FAKETLS / MASKING
|
||||
# This is where Telemt becomes invisible to Deep Packet Inspection.
|
||||
# ==============================================================================
|
||||
|
||||
[censorship]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# TLS Domain Fronting
|
||||
# The SNI (Server Name Indication) your proxy presents to connecting clients.
|
||||
# Must be a popular, unblocked HTTPS website in your target country.
|
||||
# DPI sees traffic to this domain. Choose carefully.
|
||||
# Good choices: major CDNs, banks, government sites, search engines.
|
||||
# Bad choices: obscure sites, already-blocked domains.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
tls_domain = "www.google.com"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Active Probe Masking
|
||||
# When someone connects but fails the MTProto handshake (wrong secret),
|
||||
# they might be an ISP active prober testing if this is a proxy.
|
||||
#
|
||||
# mask = false: drop the connection (prober knows something is here)
|
||||
# mask = true: transparently proxy them to mask_host (prober sees a real website)
|
||||
#
|
||||
# With mask enabled, your server is indistinguishable from a real web server
|
||||
# to anyone who doesn't have the correct secret.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
mask = true
|
||||
|
||||
# The real web server to forward failed handshakes to.
|
||||
# If omitted, defaults to tls_domain.
|
||||
# mask_host = "www.google.com"
|
||||
|
||||
# Port on the mask host to connect to.
|
||||
mask_port = 443
|
||||
|
||||
# Inject PROXY protocol header when forwarding to mask host.
|
||||
# 0 = disabled, 1 = v1, 2 = v2. Leave disabled unless mask_host expects it.
|
||||
# mask_proxy_protocol = 0
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# TLS Certificate Emulation
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Size (bytes) of the locally generated fake TLS certificate.
|
||||
# Only used when tls_emulation is disabled.
|
||||
fake_cert_len = 2048
|
||||
|
||||
# KILLER FEATURE: Real-Time TLS Emulation.
|
||||
# Telemt connects to tls_domain, fetches its actual TLS 1.3 certificate chain,
|
||||
# and exactly replicates the byte sizes of ServerHello and Certificate records.
|
||||
# Defeats DPI that uses TLS record length heuristics to detect proxies.
|
||||
# Strongly recommended in censored environments.
|
||||
tls_emulation = true
|
||||
|
||||
# Directory to cache fetched TLS certificates.
|
||||
tls_front_dir = "tlsfront"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ServerHello Timing
|
||||
# Real web servers take 30-150ms to respond to ClientHello due to network
|
||||
# latency and crypto processing. A proxy responding in <1ms is suspicious.
|
||||
# These settings add realistic delay to mimic genuine server behavior.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Minimum delay before sending ServerHello (milliseconds).
|
||||
server_hello_delay_min_ms = 50
|
||||
|
||||
# Maximum delay before sending ServerHello (milliseconds).
|
||||
server_hello_delay_max_ms = 150
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# TLS Session Tickets
|
||||
# Real TLS 1.3 servers send 1-2 NewSessionTicket messages after handshake.
|
||||
# A server that sends zero tickets is anomalous and may trigger DPI flags.
|
||||
# Set this to match your tls_domain's behavior (usually 2).
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# tls_new_session_tickets = 0
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Full Certificate Frequency
|
||||
# When tls_emulation is enabled, this controls how often (per client IP)
|
||||
# to send the complete emulated certificate chain.
|
||||
#
|
||||
# > 0: Subsequent connections within TTL seconds get a smaller cached version.
|
||||
# Saves bandwidth but creates a detectable size difference between
|
||||
# first and repeat connections.
|
||||
#
|
||||
# = 0: Every connection gets the full certificate. More bandwidth but
|
||||
# perfectly consistent behavior, no anomalies for DPI to detect.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
tls_full_cert_ttl_secs = 0
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ALPN Enforcement
|
||||
# Ensure ServerHello responds with the exact ALPN protocol the client requested.
|
||||
# Mismatched ALPN (e.g., client asks h2, server says http/1.1) is a DPI red flag.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
alpn_enforce = true
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# ACCESS CONTROL & USERS
|
||||
# ==============================================================================
|
||||
|
||||
[access]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Replay Attack Protection
|
||||
# DPI can record a legitimate user's handshake and replay it later to probe
|
||||
# whether the server is a proxy. Telemt remembers recent handshake nonces
|
||||
# and rejects duplicates.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Number of nonce slots in the replay detection buffer.
|
||||
replay_check_len = 65536
|
||||
|
||||
# How long (seconds) to remember nonces before expiring them.
|
||||
replay_window_secs = 1800
|
||||
|
||||
# Allow clients with incorrect system clocks to connect.
|
||||
# false = reject clients with significant time skew (more secure)
|
||||
# true = accept anyone regardless of clock (more permissive)
|
||||
ignore_time_skew = false
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# User Secrets
|
||||
# Each user needs a unique 32-character hex string as their secret.
|
||||
# Generate with: openssl rand -hex 16
|
||||
#
|
||||
# This secret is embedded in the tg:// link. Anyone with it can connect.
|
||||
# Format: username = "hex_secret"
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.users]
|
||||
# alice = "0123456789abcdef0123456789abcdef"
|
||||
# bob = "fedcba9876543210fedcba9876543210"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Connection Limits
|
||||
# Limits concurrent TCP connections per user to prevent secret sharing.
|
||||
# Uncomment and set for each user as needed.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_max_tcp_conns]
|
||||
# alice = 100
|
||||
# bob = 50
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Expiration Dates
|
||||
# Automatically revoke access after the specified date (ISO 8601 format).
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_expirations]
|
||||
# alice = "2025-12-31T23:59:59Z"
|
||||
# bob = "2026-06-15T00:00:00Z"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Data Quotas
|
||||
# Maximum total bytes transferred per user. Connection refused after limit.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_data_quota]
|
||||
# alice = 107374182400
|
||||
# bob = 53687091200
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Unique IP Limits
|
||||
# Maximum number of different IP addresses that can use this secret
|
||||
# at the same time. Highly effective against secret leaking/sharing.
|
||||
# Set to 1 for single-device, 2-3 for phone+desktop, etc.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_max_unique_ips]
|
||||
# alice = 3
|
||||
# bob = 2
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# UPSTREAM ROUTING
|
||||
# Controls how Telemt connects to Telegram servers (or ME servers).
|
||||
# If omitted entirely, uses the OS default route.
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Direct upstream: use the server's own network interface.
|
||||
# You can optionally bind to a specific interface or local IP.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# [[upstreams]]
|
||||
# type = "direct"
|
||||
# interface = "eth0"
|
||||
# bind_addresses = ["192.0.2.10"]
|
||||
# weight = 1
|
||||
# enabled = true
|
||||
# scopes = "*"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# SOCKS5 upstream: route Telegram traffic through a SOCKS5 proxy.
|
||||
# Useful if your server's IP is blocked from reaching Telegram DCs.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# [[upstreams]]
|
||||
# type = "socks5"
|
||||
# address = "198.51.100.30:1080"
|
||||
# username = "proxy-user"
|
||||
# password = "proxy-pass"
|
||||
# weight = 1
|
||||
# enabled = true
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# DATACENTER OVERRIDES
|
||||
# Force specific DC IDs to route to specific IP:Port combinations.
|
||||
# DC 203 (CDN) is auto-injected by Telemt if not specified here.
|
||||
# ==============================================================================
|
||||
|
||||
# [dc_overrides]
|
||||
# "201" = "149.154.175.50:443"
|
||||
# "202" = ["149.154.167.51:443", "149.154.175.100:443"]
|
||||
77
config.toml
77
config.toml
@@ -1,14 +1,15 @@
|
||||
# === UI ===
|
||||
# Users to show in the startup log (tg:// links)
|
||||
show_link = ["hello"]
|
||||
### Telemt Based Config.toml
|
||||
# We believe that these settings are sufficient for most scenarios
|
||||
# where cutting-egde methods and parameters or special solutions are not needed
|
||||
|
||||
# === General Settings ===
|
||||
[general]
|
||||
prefer_ipv6 = false
|
||||
fast_mode = true
|
||||
use_middle_proxy = true
|
||||
ad_tag = "00000000000000000000000000000000"
|
||||
use_middle_proxy = false
|
||||
# Global ad_tag fallback when user has no per-user tag in [access.user_ad_tags]
|
||||
# ad_tag = "00000000000000000000000000000000"
|
||||
# Per-user ad_tag in [access.user_ad_tags] (32 hex from @MTProxybot)
|
||||
|
||||
# === Log Level ===
|
||||
# Log level: debug | verbose | normal | silent
|
||||
# Can be overridden with --silent or --log-level CLI flags
|
||||
# RUST_LOG env var takes absolute priority over all of these
|
||||
@@ -19,62 +20,38 @@ classic = false
|
||||
secure = false
|
||||
tls = true
|
||||
|
||||
[general.links]
|
||||
show = "*"
|
||||
# show = ["alice", "bob"] # Only show links for alice and bob
|
||||
# show = "*" # Show links for all users
|
||||
# public_host = "proxy.example.com" # Host (IP or domain) for tg:// links
|
||||
# public_port = 443 # Port for tg:// links (default: server.port)
|
||||
|
||||
# === Server Binding ===
|
||||
[server]
|
||||
port = 443
|
||||
listen_addr_ipv4 = "0.0.0.0"
|
||||
listen_addr_ipv6 = "::"
|
||||
# proxy_protocol = false # Enable if behind HAProxy/nginx with PROXY protocol
|
||||
# metrics_port = 9090
|
||||
# metrics_whitelist = ["127.0.0.1", "::1"]
|
||||
# metrics_whitelist = ["127.0.0.1", "::1", "0.0.0.0/0"]
|
||||
|
||||
# Listen on multiple interfaces/IPs (overrides listen_addr_*)
|
||||
[server.api]
|
||||
enabled = true
|
||||
listen = "0.0.0.0:9091"
|
||||
whitelist = ["127.0.0.0/8"]
|
||||
minimal_runtime_enabled = false
|
||||
minimal_runtime_cache_ttl_ms = 1000
|
||||
|
||||
# Listen on multiple interfaces/IPs - IPv4
|
||||
[[server.listeners]]
|
||||
ip = "0.0.0.0"
|
||||
# announce_ip = "1.2.3.4" # Optional: Public IP for tg:// links
|
||||
|
||||
[[server.listeners]]
|
||||
ip = "::"
|
||||
|
||||
# === Timeouts (in seconds) ===
|
||||
[timeouts]
|
||||
client_handshake = 15
|
||||
tg_connect = 10
|
||||
client_keepalive = 60
|
||||
client_ack = 300
|
||||
|
||||
# === Anti-Censorship & Masking ===
|
||||
[censorship]
|
||||
tls_domain = "petrovich.ru"
|
||||
mask = true
|
||||
mask_port = 443
|
||||
# mask_host = "petrovich.ru" # Defaults to tls_domain if not set
|
||||
# mask_unix_sock = "/var/run/nginx.sock" # Unix socket (mutually exclusive with mask_host)
|
||||
fake_cert_len = 2048
|
||||
|
||||
# === Access Control & Users ===
|
||||
[access]
|
||||
replay_check_len = 65536
|
||||
replay_window_secs = 1800
|
||||
ignore_time_skew = false
|
||||
tls_emulation = true # Fetch real cert lengths and emulate TLS records
|
||||
tls_front_dir = "tlsfront" # Cache directory for TLS emulation
|
||||
|
||||
[access.users]
|
||||
# format: "username" = "32_hex_chars_secret"
|
||||
hello = "00000000000000000000000000000000"
|
||||
|
||||
# [access.user_max_tcp_conns]
|
||||
# hello = 50
|
||||
|
||||
# [access.user_data_quota]
|
||||
# hello = 1073741824 # 1 GB
|
||||
|
||||
# === Upstreams & Routing ===
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
enabled = true
|
||||
weight = 10
|
||||
|
||||
# [[upstreams]]
|
||||
# type = "socks5"
|
||||
# address = "127.0.0.1:1080"
|
||||
# enabled = false
|
||||
# weight = 1
|
||||
30
docker-compose.yml
Normal file
30
docker-compose.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
services:
|
||||
telemt:
|
||||
image: ghcr.io/telemt/telemt:latest
|
||||
build: .
|
||||
container_name: telemt
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "443:443"
|
||||
- "127.0.0.1:9090:9090"
|
||||
# Allow caching 'proxy-secret' in read-only container
|
||||
working_dir: /run/telemt
|
||||
volumes:
|
||||
- ./config.toml:/run/telemt/config.toml:ro
|
||||
tmpfs:
|
||||
- /run/telemt:rw,mode=1777,size=1m
|
||||
environment:
|
||||
- RUST_LOG=info
|
||||
# Uncomment this line if you want to use host network for IPv6, but bridge is default and usually better
|
||||
# network_mode: host
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- NET_BIND_SERVICE # allow binding to port 443
|
||||
read_only: true
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
ulimits:
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
673
docs/API.md
Normal file
673
docs/API.md
Normal file
@@ -0,0 +1,673 @@
|
||||
# Telemt Control API
|
||||
|
||||
## Purpose
|
||||
Control-plane HTTP API for runtime visibility and user/config management.
|
||||
Data-plane MTProto traffic is out of scope.
|
||||
|
||||
## Runtime Configuration
|
||||
API runtime is configured in `[server.api]`.
|
||||
|
||||
| Field | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` | `bool` | `false` | Enables REST API listener. |
|
||||
| `listen` | `string` (`IP:PORT`) | `127.0.0.1:9091` | API bind address. |
|
||||
| `whitelist` | `CIDR[]` | `127.0.0.1/32, ::1/128` | Source IP allowlist. Empty list means allow all. |
|
||||
| `auth_header` | `string` | `""` | Exact value for `Authorization` header. Empty disables header auth. |
|
||||
| `request_body_limit_bytes` | `usize` | `65536` | Maximum request body size. Must be `> 0`. |
|
||||
| `minimal_runtime_enabled` | `bool` | `false` | Enables runtime snapshot endpoints requiring ME pool read-lock aggregation. |
|
||||
| `minimal_runtime_cache_ttl_ms` | `u64` | `1000` | Cache TTL for minimal snapshots. `0` disables cache; valid range is `[0, 60000]`. |
|
||||
| `runtime_edge_enabled` | `bool` | `false` | Enables runtime edge endpoints with cached aggregation payloads. |
|
||||
| `runtime_edge_cache_ttl_ms` | `u64` | `1000` | Cache TTL for runtime edge summary payloads. `0` disables cache. |
|
||||
| `runtime_edge_top_n` | `usize` | `10` | Top-N rows for runtime edge leaderboard payloads. |
|
||||
| `runtime_edge_events_capacity` | `usize` | `256` | Ring-buffer size for `/v1/runtime/events/recent`. |
|
||||
| `read_only` | `bool` | `false` | Disables mutating endpoints. |
|
||||
|
||||
`server.admin_api` is accepted as an alias for backward compatibility.
|
||||
|
||||
Runtime validation for API config:
|
||||
- `server.api.listen` must be a valid `IP:PORT`.
|
||||
- `server.api.request_body_limit_bytes` must be `> 0`.
|
||||
- `server.api.minimal_runtime_cache_ttl_ms` must be within `[0, 60000]`.
|
||||
- `server.api.runtime_edge_cache_ttl_ms` must be within `[0, 60000]`.
|
||||
- `server.api.runtime_edge_top_n` must be within `[1, 1000]`.
|
||||
- `server.api.runtime_edge_events_capacity` must be within `[16, 4096]`.
|
||||
|
||||
## Protocol Contract
|
||||
|
||||
| Item | Value |
|
||||
| --- | --- |
|
||||
| Transport | HTTP/1.1 |
|
||||
| Content type | `application/json; charset=utf-8` |
|
||||
| Prefix | `/v1` |
|
||||
| Optimistic concurrency | `If-Match: <revision>` on mutating requests (optional) |
|
||||
| Revision format | SHA-256 hex of current `config.toml` content |
|
||||
|
||||
### Success Envelope
|
||||
```json
|
||||
{
|
||||
"ok": true,
|
||||
"data": {},
|
||||
"revision": "sha256-hex"
|
||||
}
|
||||
```
|
||||
|
||||
### Error Envelope
|
||||
```json
|
||||
{
|
||||
"ok": false,
|
||||
"error": {
|
||||
"code": "machine_code",
|
||||
"message": "human-readable"
|
||||
},
|
||||
"request_id": 1
|
||||
}
|
||||
```
|
||||
|
||||
## Request Processing Order
|
||||
|
||||
Requests are processed in this order:
|
||||
1. `api_enabled` gate (`503 api_disabled` if disabled).
|
||||
2. Source IP whitelist gate (`403 forbidden`).
|
||||
3. `Authorization` header gate when configured (`401 unauthorized`).
|
||||
4. Route and method matching (`404 not_found` or `405 method_not_allowed`).
|
||||
5. `read_only` gate for mutating routes (`403 read_only`).
|
||||
6. Request body read/limit/JSON decode (`413 payload_too_large`, `400 bad_request`).
|
||||
7. Business validation and config write path.
|
||||
|
||||
Notes:
|
||||
- Whitelist is evaluated against the direct TCP peer IP (`SocketAddr::ip`), without `X-Forwarded-For` support.
|
||||
- `Authorization` check is exact string equality against configured `auth_header`.
|
||||
|
||||
## Endpoint Matrix
|
||||
|
||||
| Method | Path | Body | Success | `data` contract |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| `GET` | `/v1/health` | none | `200` | `HealthData` |
|
||||
| `GET` | `/v1/system/info` | none | `200` | `SystemInfoData` |
|
||||
| `GET` | `/v1/runtime/gates` | none | `200` | `RuntimeGatesData` |
|
||||
| `GET` | `/v1/limits/effective` | none | `200` | `EffectiveLimitsData` |
|
||||
| `GET` | `/v1/security/posture` | none | `200` | `SecurityPostureData` |
|
||||
| `GET` | `/v1/security/whitelist` | none | `200` | `SecurityWhitelistData` |
|
||||
| `GET` | `/v1/stats/summary` | none | `200` | `SummaryData` |
|
||||
| `GET` | `/v1/stats/zero/all` | none | `200` | `ZeroAllData` |
|
||||
| `GET` | `/v1/stats/upstreams` | none | `200` | `UpstreamsData` |
|
||||
| `GET` | `/v1/stats/minimal/all` | none | `200` | `MinimalAllData` |
|
||||
| `GET` | `/v1/stats/me-writers` | none | `200` | `MeWritersData` |
|
||||
| `GET` | `/v1/stats/dcs` | none | `200` | `DcStatusData` |
|
||||
| `GET` | `/v1/runtime/me_pool_state` | none | `200` | `RuntimeMePoolStateData` |
|
||||
| `GET` | `/v1/runtime/me_quality` | none | `200` | `RuntimeMeQualityData` |
|
||||
| `GET` | `/v1/runtime/upstream_quality` | none | `200` | `RuntimeUpstreamQualityData` |
|
||||
| `GET` | `/v1/runtime/nat_stun` | none | `200` | `RuntimeNatStunData` |
|
||||
| `GET` | `/v1/runtime/connections/summary` | none | `200` | `RuntimeEdgeConnectionsSummaryData` |
|
||||
| `GET` | `/v1/runtime/events/recent` | none | `200` | `RuntimeEdgeEventsData` |
|
||||
| `GET` | `/v1/stats/users` | none | `200` | `UserInfo[]` |
|
||||
| `GET` | `/v1/users` | none | `200` | `UserInfo[]` |
|
||||
| `POST` | `/v1/users` | `CreateUserRequest` | `201` | `CreateUserResponse` |
|
||||
| `GET` | `/v1/users/{username}` | none | `200` | `UserInfo` |
|
||||
| `PATCH` | `/v1/users/{username}` | `PatchUserRequest` | `200` | `UserInfo` |
|
||||
| `DELETE` | `/v1/users/{username}` | none | `200` | `string` (deleted username) |
|
||||
| `POST` | `/v1/users/{username}/rotate-secret` | `RotateSecretRequest` or empty body | `404` | `ErrorResponse` (`not_found`, current runtime behavior) |
|
||||
|
||||
## Common Error Codes
|
||||
|
||||
| HTTP | `error.code` | Trigger |
|
||||
| --- | --- | --- |
|
||||
| `400` | `bad_request` | Invalid JSON, validation failures, malformed request body. |
|
||||
| `401` | `unauthorized` | Missing/invalid `Authorization` when `auth_header` is configured. |
|
||||
| `403` | `forbidden` | Source IP is not allowed by whitelist. |
|
||||
| `403` | `read_only` | Mutating endpoint called while `read_only=true`. |
|
||||
| `404` | `not_found` | Unknown route, unknown user, or unsupported sub-route (including current `rotate-secret` route). |
|
||||
| `405` | `method_not_allowed` | Unsupported method for `/v1/users/{username}` route shape. |
|
||||
| `409` | `revision_conflict` | `If-Match` revision mismatch. |
|
||||
| `409` | `user_exists` | User already exists on create. |
|
||||
| `409` | `last_user_forbidden` | Attempt to delete last configured user. |
|
||||
| `413` | `payload_too_large` | Body exceeds `request_body_limit_bytes`. |
|
||||
| `500` | `internal_error` | Internal error (I/O, serialization, config load/save). |
|
||||
| `503` | `api_disabled` | API disabled in config. |
|
||||
|
||||
## Routing and Method Edge Cases
|
||||
|
||||
| Case | Behavior |
|
||||
| --- | --- |
|
||||
| Path matching | Exact match on `req.uri().path()`. Query string does not affect route matching. |
|
||||
| Trailing slash | Not normalized. Example: `/v1/users/` is `404`. |
|
||||
| Username route with extra slash | `/v1/users/{username}/...` is not treated as user route and returns `404`. |
|
||||
| `PUT /v1/users/{username}` | `405 method_not_allowed`. |
|
||||
| `POST /v1/users/{username}` | `404 not_found`. |
|
||||
| `POST /v1/users/{username}/rotate-secret` | `404 not_found` in current release due route matcher limitation. |
|
||||
|
||||
## Body and JSON Semantics
|
||||
|
||||
- Request body is read only for mutating routes that define a body contract.
|
||||
- Body size limit is enforced during streaming read (`413 payload_too_large`).
|
||||
- Invalid transport body frame returns `400 bad_request` (`Invalid request body`).
|
||||
- Invalid JSON returns `400 bad_request` (`Invalid JSON body`).
|
||||
- `Content-Type` is not required for JSON parsing.
|
||||
- Unknown JSON fields are ignored by deserialization.
|
||||
- `PATCH` updates only provided fields and does not support explicit clearing of optional fields.
|
||||
- `If-Match` supports both quoted and unquoted values; surrounding whitespace is trimmed.
|
||||
|
||||
## Request Contracts
|
||||
|
||||
### `CreateUserRequest`
|
||||
| Field | Type | Required | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `username` | `string` | yes | `[A-Za-z0-9_.-]`, length `1..64`. |
|
||||
| `secret` | `string` | no | Exactly 32 hex chars. If missing, generated automatically. |
|
||||
| `user_ad_tag` | `string` | no | Exactly 32 hex chars. |
|
||||
| `max_tcp_conns` | `usize` | no | Per-user concurrent TCP limit. |
|
||||
| `expiration_rfc3339` | `string` | no | RFC3339 expiration timestamp. |
|
||||
| `data_quota_bytes` | `u64` | no | Per-user traffic quota. |
|
||||
| `max_unique_ips` | `usize` | no | Per-user unique source IP limit. |
|
||||
|
||||
### `PatchUserRequest`
|
||||
| Field | Type | Required | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `secret` | `string` | no | Exactly 32 hex chars. |
|
||||
| `user_ad_tag` | `string` | no | Exactly 32 hex chars. |
|
||||
| `max_tcp_conns` | `usize` | no | Per-user concurrent TCP limit. |
|
||||
| `expiration_rfc3339` | `string` | no | RFC3339 expiration timestamp. |
|
||||
| `data_quota_bytes` | `u64` | no | Per-user traffic quota. |
|
||||
| `max_unique_ips` | `usize` | no | Per-user unique source IP limit. |
|
||||
|
||||
### `RotateSecretRequest`
|
||||
| Field | Type | Required | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `secret` | `string` | no | Exactly 32 hex chars. If missing, generated automatically. |
|
||||
|
||||
Note: the request contract is defined, but the corresponding route currently returns `404` (see routing edge cases).
|
||||
|
||||
## Response Data Contracts
|
||||
|
||||
### `HealthData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `status` | `string` | Always `"ok"`. |
|
||||
| `read_only` | `bool` | Mirrors current API `read_only` mode. |
|
||||
|
||||
### `SummaryData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `uptime_seconds` | `f64` | Process uptime in seconds. |
|
||||
| `connections_total` | `u64` | Total accepted client connections. |
|
||||
| `connections_bad_total` | `u64` | Failed/invalid client connections. |
|
||||
| `handshake_timeouts_total` | `u64` | Handshake timeout count. |
|
||||
| `configured_users` | `usize` | Number of configured users in config. |
|
||||
|
||||
### `SystemInfoData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `version` | `string` | Binary version (`CARGO_PKG_VERSION`). |
|
||||
| `target_arch` | `string` | Target architecture (`std::env::consts::ARCH`). |
|
||||
| `target_os` | `string` | Target OS (`std::env::consts::OS`). |
|
||||
| `build_profile` | `string` | Build profile (`PROFILE` env when available). |
|
||||
| `git_commit` | `string?` | Optional commit hash from build env metadata. |
|
||||
| `build_time_utc` | `string?` | Optional build timestamp from build env metadata. |
|
||||
| `rustc_version` | `string?` | Optional compiler version from build env metadata. |
|
||||
| `process_started_at_epoch_secs` | `u64` | Process start time as Unix epoch seconds. |
|
||||
| `uptime_seconds` | `f64` | Process uptime in seconds. |
|
||||
| `config_path` | `string` | Active config file path used by runtime. |
|
||||
| `config_hash` | `string` | SHA-256 hash of current config content (same value as envelope `revision`). |
|
||||
| `config_reload_count` | `u64` | Number of successfully observed config updates since process start. |
|
||||
| `last_config_reload_epoch_secs` | `u64?` | Unix epoch seconds of the latest observed config reload; null/absent before first reload. |
|
||||
|
||||
### `RuntimeGatesData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `accepting_new_connections` | `bool` | Current admission-gate state for new listener accepts. |
|
||||
| `conditional_cast_enabled` | `bool` | Whether conditional ME admission logic is enabled (`general.use_middle_proxy`). |
|
||||
| `me_runtime_ready` | `bool` | Current ME runtime readiness status used for conditional gate decisions. |
|
||||
| `me2dc_fallback_enabled` | `bool` | Whether ME -> direct fallback is enabled. |
|
||||
| `use_middle_proxy` | `bool` | Current transport mode preference. |
|
||||
|
||||
### `EffectiveLimitsData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `update_every_secs` | `u64` | Effective unified updater interval. |
|
||||
| `me_reinit_every_secs` | `u64` | Effective ME periodic reinit interval. |
|
||||
| `me_pool_force_close_secs` | `u64` | Effective stale-writer force-close timeout. |
|
||||
| `timeouts` | `EffectiveTimeoutLimits` | Effective timeout policy snapshot. |
|
||||
| `upstream` | `EffectiveUpstreamLimits` | Effective upstream connect/retry limits. |
|
||||
| `middle_proxy` | `EffectiveMiddleProxyLimits` | Effective ME pool/floor/reconnect limits. |
|
||||
| `user_ip_policy` | `EffectiveUserIpPolicyLimits` | Effective unique-IP policy mode/window. |
|
||||
|
||||
#### `EffectiveTimeoutLimits`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `client_handshake_secs` | `u64` | Client handshake timeout. |
|
||||
| `tg_connect_secs` | `u64` | Upstream Telegram connect timeout. |
|
||||
| `client_keepalive_secs` | `u64` | Client keepalive interval. |
|
||||
| `client_ack_secs` | `u64` | ACK timeout. |
|
||||
| `me_one_retry` | `u8` | Fast retry count for single-endpoint ME DC. |
|
||||
| `me_one_timeout_ms` | `u64` | Fast retry timeout per attempt for single-endpoint ME DC. |
|
||||
|
||||
#### `EffectiveUpstreamLimits`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `connect_retry_attempts` | `u32` | Upstream connect retry attempts. |
|
||||
| `connect_retry_backoff_ms` | `u64` | Upstream retry backoff delay. |
|
||||
| `connect_budget_ms` | `u64` | Total connect wall-clock budget across retries. |
|
||||
| `unhealthy_fail_threshold` | `u32` | Consecutive fail threshold for unhealthy marking. |
|
||||
| `connect_failfast_hard_errors` | `bool` | Whether hard errors skip additional retries. |
|
||||
|
||||
#### `EffectiveMiddleProxyLimits`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `floor_mode` | `string` | Effective floor mode (`static` or `adaptive`). |
|
||||
| `adaptive_floor_idle_secs` | `u64` | Adaptive floor idle threshold. |
|
||||
| `adaptive_floor_min_writers_single_endpoint` | `u8` | Adaptive floor minimum for single-endpoint DCs. |
|
||||
| `adaptive_floor_recover_grace_secs` | `u64` | Adaptive floor recovery grace period. |
|
||||
| `reconnect_max_concurrent_per_dc` | `u32` | Max concurrent reconnects per DC. |
|
||||
| `reconnect_backoff_base_ms` | `u64` | Reconnect base backoff. |
|
||||
| `reconnect_backoff_cap_ms` | `u64` | Reconnect backoff cap. |
|
||||
| `reconnect_fast_retry_count` | `u32` | Number of fast retries before standard backoff strategy. |
|
||||
| `me2dc_fallback` | `bool` | Effective ME -> direct fallback flag. |
|
||||
|
||||
#### `EffectiveUserIpPolicyLimits`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `mode` | `string` | Unique-IP policy mode (`active_window`, `time_window`, `combined`). |
|
||||
| `window_secs` | `u64` | Time window length used by unique-IP policy. |
|
||||
|
||||
### `SecurityPostureData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `api_read_only` | `bool` | Current API read-only state. |
|
||||
| `api_whitelist_enabled` | `bool` | Whether whitelist filtering is active. |
|
||||
| `api_whitelist_entries` | `usize` | Number of configured whitelist CIDRs. |
|
||||
| `api_auth_header_enabled` | `bool` | Whether `Authorization` header validation is active. |
|
||||
| `proxy_protocol_enabled` | `bool` | Global PROXY protocol accept setting. |
|
||||
| `log_level` | `string` | Effective log level (`debug`, `verbose`, `normal`, `silent`). |
|
||||
| `telemetry_core_enabled` | `bool` | Core telemetry toggle. |
|
||||
| `telemetry_user_enabled` | `bool` | Per-user telemetry toggle. |
|
||||
| `telemetry_me_level` | `string` | ME telemetry level (`silent`, `normal`, `debug`). |
|
||||
|
||||
### `SecurityWhitelistData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot generation timestamp. |
|
||||
| `enabled` | `bool` | `true` when whitelist has at least one CIDR entry. |
|
||||
| `entries_total` | `usize` | Number of whitelist CIDR entries. |
|
||||
| `entries` | `string[]` | Whitelist CIDR entries as strings. |
|
||||
|
||||
### Runtime Min Endpoints
|
||||
- `/v1/runtime/me_pool_state`: generations, hardswap state, writer contour/health counts, refill inflight snapshot.
|
||||
- `/v1/runtime/me_quality`: ME error/drift/reconnect counters and per-DC RTT coverage snapshot.
|
||||
- `/v1/runtime/upstream_quality`: upstream runtime policy, connect counters, health summary and per-upstream DC latency/IP preference.
|
||||
- `/v1/runtime/nat_stun`: NAT/STUN runtime flags, server lists, reflection cache state and backoff remaining.
|
||||
|
||||
### Runtime Edge Endpoints
|
||||
- `/v1/runtime/connections/summary`: cached connection totals (`total/me/direct`), active users and top-N users by connections/traffic.
|
||||
- `/v1/runtime/events/recent?limit=N`: bounded control-plane ring-buffer events (`limit` clamped to `[1, 1000]`).
|
||||
- If `server.api.runtime_edge_enabled=false`, runtime edge endpoints return `enabled=false` with `reason=feature_disabled`.
|
||||
|
||||
### `ZeroAllData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot time (Unix epoch seconds). |
|
||||
| `core` | `ZeroCoreData` | Core counters and telemetry policy snapshot. |
|
||||
| `upstream` | `ZeroUpstreamData` | Upstream connect counters/histogram buckets. |
|
||||
| `middle_proxy` | `ZeroMiddleProxyData` | ME protocol/health counters. |
|
||||
| `pool` | `ZeroPoolData` | ME pool lifecycle counters. |
|
||||
| `desync` | `ZeroDesyncData` | Frame desync counters. |
|
||||
|
||||
#### `ZeroCoreData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `uptime_seconds` | `f64` | Process uptime. |
|
||||
| `connections_total` | `u64` | Total accepted connections. |
|
||||
| `connections_bad_total` | `u64` | Failed/invalid connections. |
|
||||
| `handshake_timeouts_total` | `u64` | Handshake timeouts. |
|
||||
| `configured_users` | `usize` | Configured user count. |
|
||||
| `telemetry_core_enabled` | `bool` | Core telemetry toggle. |
|
||||
| `telemetry_user_enabled` | `bool` | User telemetry toggle. |
|
||||
| `telemetry_me_level` | `string` | ME telemetry level (`off|normal|verbose`). |
|
||||
|
||||
#### `ZeroUpstreamData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `connect_attempt_total` | `u64` | Total upstream connect attempts. |
|
||||
| `connect_success_total` | `u64` | Successful upstream connects. |
|
||||
| `connect_fail_total` | `u64` | Failed upstream connects. |
|
||||
| `connect_failfast_hard_error_total` | `u64` | Fail-fast hard errors. |
|
||||
| `connect_attempts_bucket_1` | `u64` | Connect attempts resolved in 1 try. |
|
||||
| `connect_attempts_bucket_2` | `u64` | Connect attempts resolved in 2 tries. |
|
||||
| `connect_attempts_bucket_3_4` | `u64` | Connect attempts resolved in 3-4 tries. |
|
||||
| `connect_attempts_bucket_gt_4` | `u64` | Connect attempts requiring more than 4 tries. |
|
||||
| `connect_duration_success_bucket_le_100ms` | `u64` | Successful connects <=100 ms. |
|
||||
| `connect_duration_success_bucket_101_500ms` | `u64` | Successful connects 101-500 ms. |
|
||||
| `connect_duration_success_bucket_501_1000ms` | `u64` | Successful connects 501-1000 ms. |
|
||||
| `connect_duration_success_bucket_gt_1000ms` | `u64` | Successful connects >1000 ms. |
|
||||
| `connect_duration_fail_bucket_le_100ms` | `u64` | Failed connects <=100 ms. |
|
||||
| `connect_duration_fail_bucket_101_500ms` | `u64` | Failed connects 101-500 ms. |
|
||||
| `connect_duration_fail_bucket_501_1000ms` | `u64` | Failed connects 501-1000 ms. |
|
||||
| `connect_duration_fail_bucket_gt_1000ms` | `u64` | Failed connects >1000 ms. |
|
||||
|
||||
### `UpstreamsData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `enabled` | `bool` | Runtime upstream snapshot availability according to API config. |
|
||||
| `reason` | `string?` | `feature_disabled` or `source_unavailable` when runtime snapshot is unavailable. |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot generation time. |
|
||||
| `zero` | `ZeroUpstreamData` | Always available zero-cost upstream counters block. |
|
||||
| `summary` | `UpstreamSummaryData?` | Runtime upstream aggregate view, null when unavailable. |
|
||||
| `upstreams` | `UpstreamStatus[]?` | Per-upstream runtime status rows, null when unavailable. |
|
||||
|
||||
#### `UpstreamSummaryData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `configured_total` | `usize` | Total configured upstream entries. |
|
||||
| `healthy_total` | `usize` | Upstreams currently marked healthy. |
|
||||
| `unhealthy_total` | `usize` | Upstreams currently marked unhealthy. |
|
||||
| `direct_total` | `usize` | Number of direct upstream entries. |
|
||||
| `socks4_total` | `usize` | Number of SOCKS4 upstream entries. |
|
||||
| `socks5_total` | `usize` | Number of SOCKS5 upstream entries. |
|
||||
|
||||
#### `UpstreamStatus`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `upstream_id` | `usize` | Runtime upstream index. |
|
||||
| `route_kind` | `string` | Upstream route kind: `direct`, `socks4`, `socks5`. |
|
||||
| `address` | `string` | Upstream address (`direct` for direct route kind). Authentication fields are intentionally omitted. |
|
||||
| `weight` | `u16` | Selection weight. |
|
||||
| `scopes` | `string` | Configured scope selector string. |
|
||||
| `healthy` | `bool` | Current health flag. |
|
||||
| `fails` | `u32` | Consecutive fail counter. |
|
||||
| `last_check_age_secs` | `u64` | Seconds since the last health-check update. |
|
||||
| `effective_latency_ms` | `f64?` | Effective upstream latency used by selector. |
|
||||
| `dc` | `UpstreamDcStatus[]` | Per-DC latency/IP preference snapshot. |
|
||||
|
||||
#### `UpstreamDcStatus`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `dc` | `i16` | Telegram DC id. |
|
||||
| `latency_ema_ms` | `f64?` | Per-DC latency EMA value. |
|
||||
| `ip_preference` | `string` | Per-DC IP family preference: `unknown`, `prefer_v4`, `prefer_v6`, `both_work`, `unavailable`. |
|
||||
|
||||
#### `ZeroMiddleProxyData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `keepalive_sent_total` | `u64` | ME keepalive packets sent. |
|
||||
| `keepalive_failed_total` | `u64` | ME keepalive send failures. |
|
||||
| `keepalive_pong_total` | `u64` | Keepalive pong responses received. |
|
||||
| `keepalive_timeout_total` | `u64` | Keepalive timeout events. |
|
||||
| `rpc_proxy_req_signal_sent_total` | `u64` | RPC proxy activity signals sent. |
|
||||
| `rpc_proxy_req_signal_failed_total` | `u64` | RPC proxy activity signal failures. |
|
||||
| `rpc_proxy_req_signal_skipped_no_meta_total` | `u64` | Signals skipped due to missing metadata. |
|
||||
| `rpc_proxy_req_signal_response_total` | `u64` | RPC proxy signal responses received. |
|
||||
| `rpc_proxy_req_signal_close_sent_total` | `u64` | RPC proxy close signals sent. |
|
||||
| `reconnect_attempt_total` | `u64` | ME reconnect attempts. |
|
||||
| `reconnect_success_total` | `u64` | Successful reconnects. |
|
||||
| `handshake_reject_total` | `u64` | ME handshake rejects. |
|
||||
| `handshake_error_codes` | `ZeroCodeCount[]` | Handshake rejects grouped by code. |
|
||||
| `reader_eof_total` | `u64` | ME reader EOF events. |
|
||||
| `idle_close_by_peer_total` | `u64` | Idle closes initiated by peer. |
|
||||
| `route_drop_no_conn_total` | `u64` | Route drops due to missing bound connection. |
|
||||
| `route_drop_channel_closed_total` | `u64` | Route drops due to closed channel. |
|
||||
| `route_drop_queue_full_total` | `u64` | Route drops due to full queue (total). |
|
||||
| `route_drop_queue_full_base_total` | `u64` | Route drops in base queue mode. |
|
||||
| `route_drop_queue_full_high_total` | `u64` | Route drops in high queue mode. |
|
||||
| `socks_kdf_strict_reject_total` | `u64` | SOCKS KDF strict rejects. |
|
||||
| `socks_kdf_compat_fallback_total` | `u64` | SOCKS KDF compat fallbacks. |
|
||||
| `endpoint_quarantine_total` | `u64` | Endpoint quarantine activations. |
|
||||
| `kdf_drift_total` | `u64` | KDF drift detections. |
|
||||
| `kdf_port_only_drift_total` | `u64` | KDF port-only drift detections. |
|
||||
| `hardswap_pending_reuse_total` | `u64` | Pending hardswap reused events. |
|
||||
| `hardswap_pending_ttl_expired_total` | `u64` | Pending hardswap TTL expiry events. |
|
||||
| `single_endpoint_outage_enter_total` | `u64` | Entered single-endpoint outage mode. |
|
||||
| `single_endpoint_outage_exit_total` | `u64` | Exited single-endpoint outage mode. |
|
||||
| `single_endpoint_outage_reconnect_attempt_total` | `u64` | Reconnect attempts in outage mode. |
|
||||
| `single_endpoint_outage_reconnect_success_total` | `u64` | Reconnect successes in outage mode. |
|
||||
| `single_endpoint_quarantine_bypass_total` | `u64` | Quarantine bypasses in outage mode. |
|
||||
| `single_endpoint_shadow_rotate_total` | `u64` | Shadow writer rotations. |
|
||||
| `single_endpoint_shadow_rotate_skipped_quarantine_total` | `u64` | Shadow rotations skipped because of quarantine. |
|
||||
| `floor_mode_switch_total` | `u64` | Total floor mode switches. |
|
||||
| `floor_mode_switch_static_to_adaptive_total` | `u64` | Static -> adaptive switches. |
|
||||
| `floor_mode_switch_adaptive_to_static_total` | `u64` | Adaptive -> static switches. |
|
||||
|
||||
#### `ZeroCodeCount`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `code` | `i32` | Handshake error code. |
|
||||
| `total` | `u64` | Events with this code. |
|
||||
|
||||
#### `ZeroPoolData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `pool_swap_total` | `u64` | Pool swap count. |
|
||||
| `pool_drain_active` | `u64` | Current active draining pools. |
|
||||
| `pool_force_close_total` | `u64` | Forced pool closes by timeout. |
|
||||
| `pool_stale_pick_total` | `u64` | Stale writer picks for binding. |
|
||||
| `writer_removed_total` | `u64` | Writer removals total. |
|
||||
| `writer_removed_unexpected_total` | `u64` | Unexpected writer removals. |
|
||||
| `refill_triggered_total` | `u64` | Refill triggers. |
|
||||
| `refill_skipped_inflight_total` | `u64` | Refill skipped because refill already in-flight. |
|
||||
| `refill_failed_total` | `u64` | Refill failures. |
|
||||
| `writer_restored_same_endpoint_total` | `u64` | Restores on same endpoint. |
|
||||
| `writer_restored_fallback_total` | `u64` | Restores on fallback endpoint. |
|
||||
|
||||
#### `ZeroDesyncData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `secure_padding_invalid_total` | `u64` | Invalid secure padding events. |
|
||||
| `desync_total` | `u64` | Desync events total. |
|
||||
| `desync_full_logged_total` | `u64` | Fully logged desync events. |
|
||||
| `desync_suppressed_total` | `u64` | Suppressed desync logs. |
|
||||
| `desync_frames_bucket_0` | `u64` | Desync frames bucket 0. |
|
||||
| `desync_frames_bucket_1_2` | `u64` | Desync frames bucket 1-2. |
|
||||
| `desync_frames_bucket_3_10` | `u64` | Desync frames bucket 3-10. |
|
||||
| `desync_frames_bucket_gt_10` | `u64` | Desync frames bucket >10. |
|
||||
|
||||
### `MinimalAllData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `enabled` | `bool` | Whether minimal runtime snapshots are enabled by config. |
|
||||
| `reason` | `string?` | `feature_disabled` or `source_unavailable` when applicable. |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot generation time. |
|
||||
| `data` | `MinimalAllPayload?` | Null when disabled; fallback payload when source unavailable. |
|
||||
|
||||
#### `MinimalAllPayload`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `me_writers` | `MeWritersData` | ME writer status block. |
|
||||
| `dcs` | `DcStatusData` | DC aggregate status block. |
|
||||
| `me_runtime` | `MinimalMeRuntimeData?` | Runtime ME control snapshot. |
|
||||
| `network_path` | `MinimalDcPathData[]` | Active IP path selection per DC. |
|
||||
|
||||
#### `MinimalMeRuntimeData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `active_generation` | `u64` | Active pool generation. |
|
||||
| `warm_generation` | `u64` | Warm pool generation. |
|
||||
| `pending_hardswap_generation` | `u64` | Pending hardswap generation. |
|
||||
| `pending_hardswap_age_secs` | `u64?` | Pending hardswap age in seconds. |
|
||||
| `hardswap_enabled` | `bool` | Hardswap mode toggle. |
|
||||
| `floor_mode` | `string` | Writer floor mode. |
|
||||
| `adaptive_floor_idle_secs` | `u64` | Idle threshold for adaptive floor. |
|
||||
| `adaptive_floor_min_writers_single_endpoint` | `u8` | Minimum writers for single-endpoint DC in adaptive mode. |
|
||||
| `adaptive_floor_recover_grace_secs` | `u64` | Grace period for floor recovery. |
|
||||
| `me_keepalive_enabled` | `bool` | ME keepalive toggle. |
|
||||
| `me_keepalive_interval_secs` | `u64` | Keepalive period. |
|
||||
| `me_keepalive_jitter_secs` | `u64` | Keepalive jitter. |
|
||||
| `me_keepalive_payload_random` | `bool` | Randomized keepalive payload toggle. |
|
||||
| `rpc_proxy_req_every_secs` | `u64` | Period for RPC proxy request signal. |
|
||||
| `me_reconnect_max_concurrent_per_dc` | `u32` | Reconnect concurrency per DC. |
|
||||
| `me_reconnect_backoff_base_ms` | `u64` | Base reconnect backoff. |
|
||||
| `me_reconnect_backoff_cap_ms` | `u64` | Max reconnect backoff. |
|
||||
| `me_reconnect_fast_retry_count` | `u32` | Fast retry attempts before normal backoff. |
|
||||
| `me_pool_drain_ttl_secs` | `u64` | Pool drain TTL. |
|
||||
| `me_pool_force_close_secs` | `u64` | Hard close timeout for draining writers. |
|
||||
| `me_pool_min_fresh_ratio` | `f32` | Minimum fresh ratio before swap. |
|
||||
| `me_bind_stale_mode` | `string` | Stale writer bind policy. |
|
||||
| `me_bind_stale_ttl_secs` | `u64` | Stale writer TTL. |
|
||||
| `me_single_endpoint_shadow_writers` | `u8` | Shadow writers for single-endpoint DCs. |
|
||||
| `me_single_endpoint_outage_mode_enabled` | `bool` | Outage mode toggle for single-endpoint DCs. |
|
||||
| `me_single_endpoint_outage_disable_quarantine` | `bool` | Quarantine behavior in outage mode. |
|
||||
| `me_single_endpoint_outage_backoff_min_ms` | `u64` | Outage mode min reconnect backoff. |
|
||||
| `me_single_endpoint_outage_backoff_max_ms` | `u64` | Outage mode max reconnect backoff. |
|
||||
| `me_single_endpoint_shadow_rotate_every_secs` | `u64` | Shadow rotation interval. |
|
||||
| `me_deterministic_writer_sort` | `bool` | Deterministic writer ordering toggle. |
|
||||
| `me_socks_kdf_policy` | `string` | Current SOCKS KDF policy mode. |
|
||||
| `quarantined_endpoints_total` | `usize` | Total quarantined endpoints. |
|
||||
| `quarantined_endpoints` | `MinimalQuarantineData[]` | Quarantine details. |
|
||||
|
||||
#### `MinimalQuarantineData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `endpoint` | `string` | Endpoint (`ip:port`). |
|
||||
| `remaining_ms` | `u64` | Remaining quarantine duration. |
|
||||
|
||||
#### `MinimalDcPathData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `dc` | `i16` | Telegram DC identifier. |
|
||||
| `ip_preference` | `string?` | Runtime IP family preference. |
|
||||
| `selected_addr_v4` | `string?` | Selected IPv4 endpoint for this DC. |
|
||||
| `selected_addr_v6` | `string?` | Selected IPv6 endpoint for this DC. |
|
||||
|
||||
### `MeWritersData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `middle_proxy_enabled` | `bool` | `false` when minimal runtime is disabled or source unavailable. |
|
||||
| `reason` | `string?` | `feature_disabled` or `source_unavailable` when not fully available. |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot generation time. |
|
||||
| `summary` | `MeWritersSummary` | Coverage/availability summary. |
|
||||
| `writers` | `MeWriterStatus[]` | Per-writer statuses. |
|
||||
|
||||
#### `MeWritersSummary`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `configured_dc_groups` | `usize` | Number of configured DC groups. |
|
||||
| `configured_endpoints` | `usize` | Total configured ME endpoints. |
|
||||
| `available_endpoints` | `usize` | Endpoints currently available. |
|
||||
| `available_pct` | `f64` | `available_endpoints / configured_endpoints * 100`. |
|
||||
| `required_writers` | `usize` | Required writers based on current floor policy. |
|
||||
| `alive_writers` | `usize` | Writers currently alive. |
|
||||
| `coverage_pct` | `f64` | `alive_writers / required_writers * 100`. |
|
||||
|
||||
#### `MeWriterStatus`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `writer_id` | `u64` | Runtime writer identifier. |
|
||||
| `dc` | `i16?` | DC id if mapped. |
|
||||
| `endpoint` | `string` | Endpoint (`ip:port`). |
|
||||
| `generation` | `u64` | Pool generation owning this writer. |
|
||||
| `state` | `string` | Writer state (`warm`, `active`, `draining`). |
|
||||
| `draining` | `bool` | Draining flag. |
|
||||
| `degraded` | `bool` | Degraded flag. |
|
||||
| `bound_clients` | `usize` | Number of currently bound clients. |
|
||||
| `idle_for_secs` | `u64?` | Idle age in seconds if idle. |
|
||||
| `rtt_ema_ms` | `f64?` | RTT exponential moving average. |
|
||||
|
||||
### `DcStatusData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `middle_proxy_enabled` | `bool` | `false` when minimal runtime is disabled or source unavailable. |
|
||||
| `reason` | `string?` | `feature_disabled` or `source_unavailable` when not fully available. |
|
||||
| `generated_at_epoch_secs` | `u64` | Snapshot generation time. |
|
||||
| `dcs` | `DcStatus[]` | Per-DC status rows. |
|
||||
|
||||
#### `DcStatus`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `dc` | `i16` | Telegram DC id. |
|
||||
| `endpoints` | `string[]` | Endpoints in this DC (`ip:port`). |
|
||||
| `available_endpoints` | `usize` | Endpoints currently available in this DC. |
|
||||
| `available_pct` | `f64` | `available_endpoints / endpoints_total * 100`. |
|
||||
| `required_writers` | `usize` | Required writer count for this DC. |
|
||||
| `alive_writers` | `usize` | Alive writers in this DC. |
|
||||
| `coverage_pct` | `f64` | `alive_writers / required_writers * 100`. |
|
||||
| `rtt_ms` | `f64?` | Aggregated RTT for DC. |
|
||||
| `load` | `usize` | Active client sessions bound to this DC. |
|
||||
|
||||
### `UserInfo`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `username` | `string` | Username. |
|
||||
| `user_ad_tag` | `string?` | Optional ad tag (32 hex chars). |
|
||||
| `max_tcp_conns` | `usize?` | Optional max concurrent TCP limit. |
|
||||
| `expiration_rfc3339` | `string?` | Optional expiration timestamp. |
|
||||
| `data_quota_bytes` | `u64?` | Optional data quota. |
|
||||
| `max_unique_ips` | `usize?` | Optional unique IP limit. |
|
||||
| `current_connections` | `u64` | Current live connections. |
|
||||
| `active_unique_ips` | `usize` | Current active unique source IPs. |
|
||||
| `total_octets` | `u64` | Total traffic octets for this user. |
|
||||
| `links` | `UserLinks` | Active connection links derived from current config. |
|
||||
|
||||
#### `UserLinks`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `classic` | `string[]` | Active `tg://proxy` links for classic mode. |
|
||||
| `secure` | `string[]` | Active `tg://proxy` links for secure/DD mode. |
|
||||
| `tls` | `string[]` | Active `tg://proxy` links for EE-TLS mode (for each host+TLS domain). |
|
||||
|
||||
Link generation uses active config and enabled modes:
|
||||
- `[general.links].public_host/public_port` have priority.
|
||||
- If `public_host` is not set, startup-detected public IPs are used (`IPv4`, `IPv6`, or both when available).
|
||||
- Fallback host sources: listener `announce`, `announce_ip`, explicit listener `ip`.
|
||||
- Legacy fallback: `listen_addr_ipv4` and `listen_addr_ipv6` when routable.
|
||||
- Startup-detected IPs are fixed for process lifetime and refreshed on restart.
|
||||
- User rows are sorted by `username` in ascending lexical order.
|
||||
|
||||
### `CreateUserResponse`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `user` | `UserInfo` | Created or updated user view. |
|
||||
| `secret` | `string` | Effective user secret. |
|
||||
|
||||
## Mutation Semantics
|
||||
|
||||
| Endpoint | Notes |
|
||||
| --- | --- |
|
||||
| `POST /v1/users` | Creates user and validates resulting config before atomic save. |
|
||||
| `PATCH /v1/users/{username}` | Partial update of provided fields only. Missing fields remain unchanged. |
|
||||
| `POST /v1/users/{username}/rotate-secret` | Currently returns `404` in runtime route matcher; request schema is reserved for intended behavior. |
|
||||
| `DELETE /v1/users/{username}` | Deletes user and related optional settings. Last user deletion is blocked. |
|
||||
|
||||
All mutating endpoints:
|
||||
- Respect `read_only` mode.
|
||||
- Accept optional `If-Match` for optimistic concurrency.
|
||||
- Return new `revision` after successful write.
|
||||
- Use process-local mutation lock + atomic write (`tmp + rename`) for config persistence.
|
||||
|
||||
## Runtime State Matrix
|
||||
|
||||
| Endpoint | `minimal_runtime_enabled=false` | `minimal_runtime_enabled=true` + source unavailable | `minimal_runtime_enabled=true` + source available |
|
||||
| --- | --- | --- | --- |
|
||||
| `/v1/stats/minimal/all` | `enabled=false`, `reason=feature_disabled`, `data=null` | `enabled=true`, `reason=source_unavailable`, fallback `data` with disabled ME blocks | `enabled=true`, `reason` omitted, full payload |
|
||||
| `/v1/stats/me-writers` | `middle_proxy_enabled=false`, `reason=feature_disabled` | `middle_proxy_enabled=false`, `reason=source_unavailable` | `middle_proxy_enabled=true`, runtime snapshot |
|
||||
| `/v1/stats/dcs` | `middle_proxy_enabled=false`, `reason=feature_disabled` | `middle_proxy_enabled=false`, `reason=source_unavailable` | `middle_proxy_enabled=true`, runtime snapshot |
|
||||
| `/v1/stats/upstreams` | `enabled=false`, `reason=feature_disabled`, `summary/upstreams` omitted, `zero` still present | `enabled=true`, `reason=source_unavailable`, `summary/upstreams` omitted, `zero` present | `enabled=true`, `reason` omitted, `summary/upstreams` present, `zero` present |
|
||||
|
||||
`source_unavailable` conditions:
|
||||
- ME endpoints: ME pool is absent (for example direct-only mode or failed ME initialization).
|
||||
- Upstreams endpoint: non-blocking upstream snapshot lock is unavailable at request time.
|
||||
|
||||
## Serialization Rules
|
||||
|
||||
- Success responses always include `revision`.
|
||||
- Error responses never include `revision`; they include `request_id`.
|
||||
- Optional fields with `skip_serializing_if` are omitted when absent.
|
||||
- Nullable payload fields may still be `null` where contract uses `?` (for example `UserInfo` option fields).
|
||||
- For `/v1/stats/upstreams`, authentication details of SOCKS upstreams are intentionally omitted.
|
||||
|
||||
## Operational Notes
|
||||
|
||||
| Topic | Details |
|
||||
| --- | --- |
|
||||
| API startup | API listener is spawned only when `[server.api].enabled=true`. |
|
||||
| `listen` port `0` | API spawn is skipped when parsed listen port is `0` (treated as disabled bind target). |
|
||||
| Bind failure | Failed API bind logs warning and API task exits (no auto-retry loop). |
|
||||
| ME runtime status endpoints | `/v1/stats/me-writers`, `/v1/stats/dcs`, `/v1/stats/minimal/all` require `[server.api].minimal_runtime_enabled=true`; otherwise they return disabled payload with `reason=feature_disabled`. |
|
||||
| Upstream runtime endpoint | `/v1/stats/upstreams` always returns `zero`, but runtime fields (`summary`, `upstreams`) require `[server.api].minimal_runtime_enabled=true`. |
|
||||
| Restart requirements | `server.api` changes are restart-required for predictable behavior. |
|
||||
| Hot-reload nuance | A pure `server.api`-only config change may not propagate through watcher broadcast; a mixed change (with hot fields) may propagate API flags while still warning that restart is required. |
|
||||
| Runtime apply path | Successful writes are picked up by existing config watcher/hot-reload path. |
|
||||
| Exposure | Built-in TLS/mTLS is not provided. Use loopback bind + reverse proxy if needed. |
|
||||
| Pagination | User list currently has no pagination/filtering. |
|
||||
| Serialization side effect | Config comments/manual formatting are not preserved on write. |
|
||||
|
||||
## Known Limitations (Current Release)
|
||||
|
||||
- `POST /v1/users/{username}/rotate-secret` is currently unreachable in route matcher and returns `404`.
|
||||
- API runtime controls under `server.api` are documented as restart-required; hot-reload behavior for these fields is not strictly uniform in all change combinations.
|
||||
72
docs/FAQ.ru.md
Normal file
72
docs/FAQ.ru.md
Normal file
@@ -0,0 +1,72 @@
|
||||
## Как настроить канал "спонсор прокси"
|
||||
|
||||
1. Зайти в бота @MTProxybot.
|
||||
2. Ввести команду `/newproxy`
|
||||
3. Отправить IP и порт сервера. Например: 1.2.3.4:443
|
||||
4. Открыть конфиг `nano /etc/telemt.toml`.
|
||||
5. Скопировать и отправить боту секрет пользователя из раздела [access.users].
|
||||
6. Скопировать полученный tag у бота. Например 1234567890abcdef1234567890abcdef.
|
||||
> [!WARNING]
|
||||
> Ссылка, которую выдает бот, не будет работать. Не копируйте и не используйте её!
|
||||
7. Раскомментировать параметр ad_tag и вписать tag, полученный у бота.
|
||||
8. Раскомментировать/добавить параметр use_middle_proxy = true.
|
||||
|
||||
Пример конфига:
|
||||
```toml
|
||||
[general]
|
||||
ad_tag = "1234567890abcdef1234567890abcdef"
|
||||
use_middle_proxy = true
|
||||
```
|
||||
9. Сохранить конфиг. Ctrl+S -> Ctrl+X.
|
||||
10. Перезапустить telemt `systemctl restart telemt`.
|
||||
11. В боте отправить команду /myproxies и выбрать добавленный сервер.
|
||||
12. Нажать кнопку "Set promotion".
|
||||
13. Отправить **публичную ссылку** на канал. Приватный канал добавить нельзя!
|
||||
14. Подождать примерно 1 час, пока информация обновится на серверах Telegram.
|
||||
> [!WARNING]
|
||||
> У вас не будет отображаться "спонсор прокси" если вы уже подписаны на канал.
|
||||
|
||||
**Также вы можете настроить разные каналы для разных пользователей.**
|
||||
```toml
|
||||
[access.user_ad_tags]
|
||||
hello = "ad_tag"
|
||||
hello2 = "ad_tag2"
|
||||
```
|
||||
|
||||
## Сколько человек может пользоваться 1 ссылкой
|
||||
|
||||
По умолчанию 1 ссылкой может пользоваться сколько угодно человек.
|
||||
Вы можете ограничить число IP, использующих прокси.
|
||||
```toml
|
||||
[access.user_max_unique_ips]
|
||||
hello = 1
|
||||
```
|
||||
Этот параметр ограничивает, сколько уникальных IP может использовать 1 ссылку одновременно. Если один пользователь отключится, второй сможет подключиться. Также с одного IP может сидеть несколько пользователей.
|
||||
|
||||
## Как сделать несколько разных ссылок
|
||||
|
||||
1. Сгенерируйте нужное число секретов `openssl rand -hex 16`
|
||||
2. Открыть конфиг `nano /etc/telemt.toml`
|
||||
3. Добавить новых пользователей.
|
||||
```toml
|
||||
[access.users]
|
||||
user1 = "00000000000000000000000000000001"
|
||||
user2 = "00000000000000000000000000000002"
|
||||
user3 = "00000000000000000000000000000003"
|
||||
```
|
||||
4. Сохранить конфиг. Ctrl+S -> Ctrl+X. Перезапускать telemt не нужно.
|
||||
5. Получить ссылки через `journalctl -u telemt -n -g "links" --no-pager -o cat | tac`
|
||||
|
||||
## Как посмотреть метрики
|
||||
|
||||
1. Открыть конфиг `nano /etc/telemt.toml`
|
||||
2. Добавить следующие параметры
|
||||
```toml
|
||||
[server]
|
||||
metrics_port = 9090
|
||||
metrics_whitelist = ["127.0.0.1/32", "::1/128", "0.0.0.0/0"]
|
||||
```
|
||||
3. Сохранить конфиг. Ctrl+S -> Ctrl+X.
|
||||
4. Метрики доступны по адресу SERVER_IP:9090/metrics.
|
||||
> [!WARNING]
|
||||
> "0.0.0.0/0" в metrics_whitelist открывает доступ с любого IP. Замените на свой ip. Например "1.2.3.4"
|
||||
40
docs/MIDDLE-END-KDF.de.md
Normal file
40
docs/MIDDLE-END-KDF.de.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# Middle-End Proxy
|
||||
|
||||
## KDF-Adressierung — Implementierungs-FAQ
|
||||
|
||||
### Benötigt die C-Referenzimplementierung sowohl externe IP-Adresse als auch Port für die KDF?
|
||||
|
||||
Ja.
|
||||
|
||||
In der C-Referenzimplementierung werden **sowohl IP-Adresse als auch Port in die KDF einbezogen** — auf beiden Seiten der Verbindung.
|
||||
|
||||
In `aes_create_keys()` enthält der KDF-Input:
|
||||
|
||||
- `server_ip + client_port`
|
||||
- `client_ip + server_port`
|
||||
- sowie Secret / Nonces
|
||||
|
||||
Für IPv6:
|
||||
|
||||
- IPv4-Felder werden auf 0 gesetzt
|
||||
- IPv6-Adressen werden ergänzt
|
||||
|
||||
Die **Ports bleiben weiterhin Bestandteil der KDF**.
|
||||
|
||||
> Wenn sich externe IP oder Port (z. B. durch NAT, SOCKS oder Proxy) von den erwarteten Werten unterscheiden, entstehen unterschiedliche Schlüssel — der Handshake schlägt fehl.
|
||||
|
||||
---
|
||||
|
||||
### Kann der Port aus der KDF ausgeschlossen werden (z. B. durch Port = 0)?
|
||||
|
||||
**Nein!**
|
||||
|
||||
Die C-Referenzimplementierung enthält **keine Möglichkeit, den Port zu ignorieren**:
|
||||
- `client_port` und `server_port` sind fester Bestandteil der KDF
|
||||
- Es werden immer reale Socket-Ports übergeben:
|
||||
- `c->our_port`
|
||||
- `c->remote_port`
|
||||
|
||||
Falls ein Port den Wert `0` hat, wird er dennoch als `0` in die KDF übernommen.
|
||||
|
||||
Eine „Port-Ignore“-Logik existiert nicht.
|
||||
41
docs/MIDDLE-END-KDF.en.md
Normal file
41
docs/MIDDLE-END-KDF.en.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Middle-End Proxy
|
||||
|
||||
## KDF Addressing — Implementation FAQ
|
||||
|
||||
### Does the C-implementation require both external IP address and port for the KDF?
|
||||
|
||||
**Yes!**
|
||||
|
||||
In the C reference implementation, **both IP address and port are included in the KDF input** from both sides of the connection.
|
||||
|
||||
Inside `aes_create_keys()`, the KDF input explicitly contains:
|
||||
|
||||
- `server_ip + client_port`
|
||||
- `client_ip + server_port`
|
||||
- followed by shared secret / nonces
|
||||
|
||||
For IPv6:
|
||||
|
||||
- IPv4 fields are zeroed
|
||||
- IPv6 addresses are inserted
|
||||
|
||||
However, **client_port and server_port remain part of the KDF regardless of IP version**.
|
||||
|
||||
> If externally observed IP or port (e.g. due to NAT, SOCKS, or proxy traversal) differs from what the peer expects, the derived keys will not match and the handshake will fail.
|
||||
|
||||
---
|
||||
|
||||
### Can port be excluded from KDF (e.g. by using port = 0)?
|
||||
|
||||
**No!**
|
||||
|
||||
The C-implementation provides **no mechanism to ignore the port**:
|
||||
|
||||
- `client_port` and `server_port` are explicitly included in the KDF input
|
||||
- Real socket ports are always passed:
|
||||
- `c->our_port`
|
||||
- `c->remote_port`
|
||||
|
||||
If a port is `0`, it is still incorporated into the KDF as `0`.
|
||||
|
||||
There is **no conditional logic to exclude ports**
|
||||
41
docs/MIDDLE-END-KDF.ru.md
Normal file
41
docs/MIDDLE-END-KDF.ru.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Middle-End Proxy
|
||||
|
||||
## KDF Addressing — FAQ по реализации
|
||||
|
||||
### Требует ли C-референсная реализация KDF внешний IP и порт?
|
||||
|
||||
**Да**
|
||||
|
||||
В C-референсе **в KDF участвуют и IP-адрес, и порт** — с обеих сторон соединения.
|
||||
|
||||
В `aes_create_keys()` в строку KDF входят:
|
||||
|
||||
- `server_ip + client_port`
|
||||
- `client_ip + server_port`
|
||||
- далее secret / nonces
|
||||
|
||||
Для IPv6:
|
||||
|
||||
- IPv4-поля заполняются нулями
|
||||
- добавляются IPv6-адреса
|
||||
|
||||
Однако **порты client_port и server_port всё равно участвуют в KDF**.
|
||||
|
||||
> Если внешний IP или порт (например, из-за NAT, SOCKS или прокси) не совпадает с ожидаемым другой стороной — ключи расходятся и handshake ломается.
|
||||
|
||||
---
|
||||
|
||||
### Можно ли исключить порт из KDF (например, установив порт = 0)?
|
||||
|
||||
**Нет.**
|
||||
|
||||
В C-референсе **нет механики отключения порта**.
|
||||
|
||||
- `client_port` и `server_port` явно включены в KDF
|
||||
- Передаются реальные порты сокета:
|
||||
- `c->our_port`
|
||||
- `c->remote_port`
|
||||
|
||||
Если порт равен `0`, он всё равно попадёт в KDF как `0`.
|
||||
|
||||
Отдельной логики «игнорировать порт» не предусмотрено.
|
||||
153
docs/QUICK_START_GUIDE.en.md
Normal file
153
docs/QUICK_START_GUIDE.en.md
Normal file
@@ -0,0 +1,153 @@
|
||||
# Telemt via Systemd
|
||||
|
||||
## Installation
|
||||
|
||||
This software is designed for Debian-based OS: in addition to Debian, these are Ubuntu, Mint, Kali, MX and many other Linux
|
||||
|
||||
**1. Download**
|
||||
```bash
|
||||
wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz
|
||||
```
|
||||
**2. Move to the Bin folder**
|
||||
```bash
|
||||
mv telemt /bin
|
||||
```
|
||||
**3. Make the file executable**
|
||||
```bash
|
||||
chmod +x /bin/telemt
|
||||
```
|
||||
|
||||
## How to use?
|
||||
|
||||
**This guide "assumes" that you:**
|
||||
- logged in as root or executed `su -` / `sudo su`
|
||||
- Already have the "telemt" executable file in the /bin folder. Read the **[Installation](#Installation)** section.
|
||||
|
||||
---
|
||||
|
||||
**0. Check port and generate secrets**
|
||||
|
||||
The port you have selected for use should be MISSING from the list, when:
|
||||
```bash
|
||||
netstat -lnp
|
||||
```
|
||||
|
||||
Generate 16 bytes/32 characters HEX with OpenSSL or another way:
|
||||
```bash
|
||||
openssl rand -hex 16
|
||||
```
|
||||
OR
|
||||
```bash
|
||||
xxd -l 16 -p /dev/urandom
|
||||
```
|
||||
OR
|
||||
```bash
|
||||
python3 -c 'import os; print(os.urandom(16).hex())'
|
||||
```
|
||||
Save the obtained result somewhere. You will need it later!
|
||||
|
||||
---
|
||||
|
||||
**1. Place your config to /etc/telemt.toml**
|
||||
|
||||
Open nano
|
||||
```bash
|
||||
nano /etc/telemt.toml
|
||||
```
|
||||
paste your config
|
||||
|
||||
```toml
|
||||
# === General Settings ===
|
||||
[general]
|
||||
# ad_tag = "00000000000000000000000000000000"
|
||||
use_middle_proxy = false
|
||||
|
||||
[general.modes]
|
||||
classic = false
|
||||
secure = false
|
||||
tls = true
|
||||
|
||||
# === Anti-Censorship & Masking ===
|
||||
[censorship]
|
||||
tls_domain = "petrovich.ru"
|
||||
|
||||
[access.users]
|
||||
# format: "username" = "32_hex_chars_secret"
|
||||
hello = "00000000000000000000000000000000"
|
||||
```
|
||||
then Ctrl+S -> Ctrl+X to save
|
||||
|
||||
> [!WARNING]
|
||||
> Replace the value of the hello parameter with the value you obtained in step 0.
|
||||
> Replace the value of the tls_domain parameter with another website.
|
||||
|
||||
---
|
||||
|
||||
**2. Create service on /etc/systemd/system/telemt.service**
|
||||
|
||||
Open nano
|
||||
```bash
|
||||
nano /etc/systemd/system/telemt.service
|
||||
```
|
||||
|
||||
paste this Systemd Module
|
||||
```bash
|
||||
[Unit]
|
||||
Description=Telemt
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/bin
|
||||
ExecStart=/bin/telemt /etc/telemt.toml
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
then Ctrl+S -> Ctrl+X to save
|
||||
|
||||
|
||||
**3.** To start it, enter the command `systemctl start telemt`
|
||||
|
||||
**4.** To get status information, enter `systemctl status telemt`
|
||||
|
||||
**5.** For automatic startup at system boot, enter `systemctl enable telemt`
|
||||
|
||||
**6.** To get the links, enter `journalctl -u telemt -n -g "links" --no-pager -o cat | tac`
|
||||
|
||||
---
|
||||
|
||||
# Telemt via Docker Compose
|
||||
|
||||
**1. Edit `config.toml` in repo root (at least: port, users secrets, tls_domain)**
|
||||
**2. Start container:**
|
||||
```bash
|
||||
docker compose up -d --build
|
||||
```
|
||||
**3. Check logs:**
|
||||
```bash
|
||||
docker compose logs -f telemt
|
||||
```
|
||||
**4. Stop:**
|
||||
```bash
|
||||
docker compose down
|
||||
```
|
||||
> [!NOTE]
|
||||
> - `docker-compose.yml` maps `./config.toml` to `/app/config.toml` (read-only)
|
||||
> - By default it publishes `443:443` and runs with dropped capabilities (only `NET_BIND_SERVICE` is added)
|
||||
> - If you really need host networking (usually only for some IPv6 setups) uncomment `network_mode: host`
|
||||
|
||||
**Run without Compose**
|
||||
```bash
|
||||
docker build -t telemt:local .
|
||||
docker run --name telemt --restart unless-stopped \
|
||||
-p 443:443 \
|
||||
-e RUST_LOG=info \
|
||||
-v "$PWD/config.toml:/app/config.toml:ro" \
|
||||
--read-only \
|
||||
--cap-drop ALL --cap-add NET_BIND_SERVICE \
|
||||
--ulimit nofile=65536:65536 \
|
||||
telemt:local
|
||||
```
|
||||
155
docs/QUICK_START_GUIDE.ru.md
Normal file
155
docs/QUICK_START_GUIDE.ru.md
Normal file
@@ -0,0 +1,155 @@
|
||||
# Telemt через Systemd
|
||||
|
||||
## Установка
|
||||
|
||||
Это программное обеспечение разработано для ОС на базе Debian: помимо Debian, это Ubuntu, Mint, Kali, MX и многие другие Linux
|
||||
|
||||
**1. Скачать**
|
||||
```bash
|
||||
wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz
|
||||
```
|
||||
**2. Переместить в папку Bin**
|
||||
```bash
|
||||
mv telemt /bin
|
||||
```
|
||||
**3. Сделать файл исполняемым**
|
||||
```bash
|
||||
chmod +x /bin/telemt
|
||||
```
|
||||
|
||||
## Как правильно использовать?
|
||||
|
||||
**Эта инструкция "предполагает", что вы:**
|
||||
- Авторизовались как пользователь root или выполнил `su -` / `sudo su`
|
||||
- У вас уже есть исполняемый файл "telemt" в папке /bin. Читайте раздел **[Установка](#установка)**
|
||||
|
||||
---
|
||||
|
||||
**0. Проверьте порт и сгенерируйте секреты**
|
||||
|
||||
Порт, который вы выбрали для использования, должен отсутствовать в списке:
|
||||
```bash
|
||||
netstat -lnp
|
||||
```
|
||||
|
||||
Сгенерируйте 16 bytes/32 символа в шестнадцатеричном формате с помощью OpenSSL или другим способом:
|
||||
```bash
|
||||
openssl rand -hex 16
|
||||
```
|
||||
ИЛИ
|
||||
```bash
|
||||
xxd -l 16 -p /dev/urandom
|
||||
```
|
||||
ИЛИ
|
||||
```bash
|
||||
python3 -c 'import os; print(os.urandom(16).hex())'
|
||||
```
|
||||
Полученный результат сохраняем где-нибудь. Он понадобиться вам дальше!
|
||||
|
||||
---
|
||||
|
||||
**1. Поместите свою конфигурацию в файл /etc/telemt.toml**
|
||||
|
||||
Открываем nano
|
||||
```bash
|
||||
nano /etc/telemt.toml
|
||||
```
|
||||
Вставьте свою конфигурацию
|
||||
|
||||
```toml
|
||||
# === General Settings ===
|
||||
[general]
|
||||
# ad_tag = "00000000000000000000000000000000"
|
||||
use_middle_proxy = false
|
||||
|
||||
[general.modes]
|
||||
classic = false
|
||||
secure = false
|
||||
tls = true
|
||||
|
||||
# === Anti-Censorship & Masking ===
|
||||
[censorship]
|
||||
tls_domain = "petrovich.ru"
|
||||
|
||||
[access.users]
|
||||
# format: "username" = "32_hex_chars_secret"
|
||||
hello = "00000000000000000000000000000000"
|
||||
```
|
||||
Затем нажмите Ctrl+S -> Ctrl+X, чтобы сохранить
|
||||
|
||||
> [!WARNING]
|
||||
> Замените значение параметра hello на значение, которое вы получили в пункте 0.
|
||||
> Так же замените значение параметра tls_domain на другой сайт.
|
||||
|
||||
---
|
||||
|
||||
**2. Создайте службу в /etc/systemd/system/telemt.service**
|
||||
|
||||
Открываем nano
|
||||
```bash
|
||||
nano /etc/systemd/system/telemt.service
|
||||
```
|
||||
|
||||
Вставьте этот модуль Systemd
|
||||
```bash
|
||||
[Unit]
|
||||
Description=Telemt
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/bin
|
||||
ExecStart=/bin/telemt /etc/telemt.toml
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
Затем нажмите Ctrl+S -> Ctrl+X, чтобы сохранить
|
||||
|
||||
|
||||
**3.** Для запуска введите команду `systemctl start telemt`
|
||||
|
||||
**4.** Для получения информации о статусе введите `systemctl status telemt`
|
||||
|
||||
**5.** Для автоматического запуска при запуске системы в введите `systemctl enable telemt`
|
||||
|
||||
**6.** Для получения ссылки введите `journalctl -u telemt -n -g "links" --no-pager -o cat | tac`
|
||||
> [!WARNING]
|
||||
> Рабочую ссылку может выдать только команда из 6 пункта. Не пытайтесь делать ее самостоятельно или копировать откуда-либо!
|
||||
|
||||
---
|
||||
|
||||
# Telemt через Docker Compose
|
||||
|
||||
**1. Отредактируйте `config.toml` в корневом каталоге репозитория (как минимум: порт, пользовательские секреты, tls_domain)**
|
||||
**2. Запустите контейнер:**
|
||||
```bash
|
||||
docker compose up -d --build
|
||||
```
|
||||
**3. Проверьте логи:**
|
||||
```bash
|
||||
docker compose logs -f telemt
|
||||
```
|
||||
**4. Остановите контейнер:**
|
||||
```bash
|
||||
docker compose down
|
||||
```
|
||||
> [!NOTE]
|
||||
> - В `docker-compose.yml` файл `./config.toml` монтируется в `/app/config.toml` (доступно только для чтения)
|
||||
> - По умолчанию публикуются порты 443:443, а контейнер запускается со сброшенными привилегиями (добавлена только `NET_BIND_SERVICE`)
|
||||
> - Если вам действительно нужна сеть хоста (обычно это требуется только для некоторых конфигураций IPv6), раскомментируйте `network_mode: host`
|
||||
|
||||
**Запуск в Docker Compose**
|
||||
```bash
|
||||
docker build -t telemt:local .
|
||||
docker run --name telemt --restart unless-stopped \
|
||||
-p 443:443 \
|
||||
-e RUST_LOG=info \
|
||||
-v "$PWD/config.toml:/app/config.toml:ro" \
|
||||
--read-only \
|
||||
--cap-drop ALL --cap-add NET_BIND_SERVICE \
|
||||
--ulimit nofile=65536:65536 \
|
||||
telemt:local
|
||||
```
|
||||
219
docs/TUNING.de.md
Normal file
219
docs/TUNING.de.md
Normal file
@@ -0,0 +1,219 @@
|
||||
# Telemt Tuning-Leitfaden: Middle-End und Upstreams
|
||||
|
||||
Dieses Dokument beschreibt das aktuelle Laufzeitverhalten für Middle-End (ME) und Upstream-Routing basierend auf:
|
||||
- `src/config/types.rs`
|
||||
- `src/config/defaults.rs`
|
||||
- `src/config/load.rs`
|
||||
- `src/transport/upstream.rs`
|
||||
|
||||
Die unten angegebenen `Default`-Werte sind Code-Defaults (bei fehlendem Schlüssel), nicht zwingend die Werte aus `config.full.toml`.
|
||||
|
||||
## Middle-End-Parameter
|
||||
|
||||
### 1) ME-Grundmodus, NAT und STUN
|
||||
|
||||
| Parameter | Typ | Default | Einschränkungen / Validierung | Laufzeiteffekt | Beispiel |
|
||||
|---|---|---:|---|---|---|
|
||||
| `general.use_middle_proxy` | `bool` | `true` | keine | Aktiviert den ME-Transportmodus. Bei `false` wird Direct-Modus verwendet. | `use_middle_proxy = true` |
|
||||
| `general.proxy_secret_path` | `Option<String>` | `"proxy-secret"` | Pfad kann `null` sein | Pfad zur Telegram-Infrastrukturdatei `proxy-secret`. | `proxy_secret_path = "proxy-secret"` |
|
||||
| `general.middle_proxy_nat_ip` | `Option<IpAddr>` | `null` | gültige IP bei gesetztem Wert | Manueller Override der öffentlichen NAT-IP für ME-Adressmaterial. | `middle_proxy_nat_ip = "203.0.113.10"` |
|
||||
| `general.middle_proxy_nat_probe` | `bool` | `true` | wird auf `true` erzwungen, wenn `use_middle_proxy=true` | Aktiviert NAT-Probing für ME. | `middle_proxy_nat_probe = true` |
|
||||
| `general.stun_nat_probe_concurrency` | `usize` | `8` | muss `> 0` sein | Maximale parallele STUN-Probes während NAT-Erkennung. | `stun_nat_probe_concurrency = 16` |
|
||||
| `network.stun_use` | `bool` | `true` | keine | Globaler STUN-Schalter. Bei `false` wird STUN deaktiviert. | `stun_use = true` |
|
||||
| `network.stun_servers` | `Vec<String>` | integrierter öffentlicher Pool | Duplikate/leer werden entfernt | Primäre STUN-Serverliste für NAT/Public-Endpoint-Erkennung. | `stun_servers = ["stun1.l.google.com:19302"]` |
|
||||
| `network.stun_tcp_fallback` | `bool` | `true` | keine | Aktiviert TCP-Fallback, wenn UDP-STUN blockiert ist. | `stun_tcp_fallback = true` |
|
||||
| `network.http_ip_detect_urls` | `Vec<String>` | `ifconfig.me` + `api.ipify.org` | keine | HTTP-Fallback zur öffentlichen IPv4-Erkennung, falls STUN ausfällt. | `http_ip_detect_urls = ["https://api.ipify.org"]` |
|
||||
| `general.stun_iface_mismatch_ignore` | `bool` | `false` | keine | Reserviertes Feld in der aktuellen Revision (derzeit kein aktiver Runtime-Verbrauch). | `stun_iface_mismatch_ignore = false` |
|
||||
| `timeouts.me_one_retry` | `u8` | `12` | keine | Anzahl schneller Reconnect-Versuche bei Single-Endpoint-DC-Fällen. | `me_one_retry = 6` |
|
||||
| `timeouts.me_one_timeout_ms` | `u64` | `1200` | keine | Timeout pro schnellem Einzelversuch (ms). | `me_one_timeout_ms = 1500` |
|
||||
|
||||
### 2) Poolgröße, Keepalive und Reconnect-Policy
|
||||
|
||||
| Parameter | Typ | Default | Einschränkungen / Validierung | Laufzeiteffekt | Beispiel |
|
||||
|---|---|---:|---|---|---|
|
||||
| `general.middle_proxy_pool_size` | `usize` | `8` | keine | Zielgröße des aktiven ME-Writer-Pools. | `middle_proxy_pool_size = 12` |
|
||||
| `general.middle_proxy_warm_standby` | `usize` | `16` | keine | Reserviertes Kompatibilitätsfeld in der aktuellen Revision (kein aktiver Runtime-Consumer). | `middle_proxy_warm_standby = 16` |
|
||||
| `general.me_keepalive_enabled` | `bool` | `true` | keine | Aktiviert periodischen ME-Keepalive/Ping-Traffic. | `me_keepalive_enabled = true` |
|
||||
| `general.me_keepalive_interval_secs` | `u64` | `25` | keine | Basisintervall für Keepalive (Sekunden). | `me_keepalive_interval_secs = 20` |
|
||||
| `general.me_keepalive_jitter_secs` | `u64` | `5` | keine | Keepalive-Jitter zur Vermeidung synchroner Peaks. | `me_keepalive_jitter_secs = 3` |
|
||||
| `general.me_keepalive_payload_random` | `bool` | `true` | keine | Randomisiert Keepalive-Payload-Bytes. | `me_keepalive_payload_random = true` |
|
||||
| `general.me_warmup_stagger_enabled` | `bool` | `true` | keine | Aktiviert gestaffeltes Warmup zusätzlicher ME-Verbindungen. | `me_warmup_stagger_enabled = true` |
|
||||
| `general.me_warmup_step_delay_ms` | `u64` | `500` | keine | Basisverzögerung zwischen Warmup-Schritten (ms). | `me_warmup_step_delay_ms = 300` |
|
||||
| `general.me_warmup_step_jitter_ms` | `u64` | `300` | keine | Zusätzlicher zufälliger Warmup-Jitter (ms). | `me_warmup_step_jitter_ms = 200` |
|
||||
| `general.me_reconnect_max_concurrent_per_dc` | `u32` | `8` | keine | Begrenzung paralleler Reconnect-Worker pro DC. | `me_reconnect_max_concurrent_per_dc = 12` |
|
||||
| `general.me_reconnect_backoff_base_ms` | `u64` | `500` | keine | Initiales Reconnect-Backoff (ms). | `me_reconnect_backoff_base_ms = 250` |
|
||||
| `general.me_reconnect_backoff_cap_ms` | `u64` | `30000` | keine | Maximales Reconnect-Backoff (ms). | `me_reconnect_backoff_cap_ms = 10000` |
|
||||
| `general.me_reconnect_fast_retry_count` | `u32` | `16` | keine | Budget für Sofort-Retries vor längerem Backoff. | `me_reconnect_fast_retry_count = 8` |
|
||||
|
||||
### 3) Reinit/Hardswap, Secret-Rotation und Degradation
|
||||
|
||||
| Parameter | Typ | Default | Einschränkungen / Validierung | Laufzeiteffekt | Beispiel |
|
||||
|---|---|---:|---|---|---|
|
||||
| `general.hardswap` | `bool` | `true` | keine | Aktiviert generation-basierte Hardswap-Strategie für den ME-Pool. | `hardswap = true` |
|
||||
| `general.me_reinit_every_secs` | `u64` | `900` | muss `> 0` sein | Intervall für periodische ME-Reinitialisierung. | `me_reinit_every_secs = 600` |
|
||||
| `general.me_hardswap_warmup_delay_min_ms` | `u64` | `1000` | muss `<= me_hardswap_warmup_delay_max_ms` sein | Untere Grenze für Warmup-Dial-Abstände. | `me_hardswap_warmup_delay_min_ms = 500` |
|
||||
| `general.me_hardswap_warmup_delay_max_ms` | `u64` | `2000` | muss `> 0` sein | Obere Grenze für Warmup-Dial-Abstände. | `me_hardswap_warmup_delay_max_ms = 1200` |
|
||||
| `general.me_hardswap_warmup_extra_passes` | `u8` | `3` | Bereich `[0,10]` | Zusätzliche Warmup-Pässe nach dem Basispass. | `me_hardswap_warmup_extra_passes = 2` |
|
||||
| `general.me_hardswap_warmup_pass_backoff_base_ms` | `u64` | `500` | muss `> 0` sein | Basis-Backoff zwischen zusätzlichen Warmup-Pässen. | `me_hardswap_warmup_pass_backoff_base_ms = 400` |
|
||||
| `general.me_config_stable_snapshots` | `u8` | `2` | muss `> 0` sein | Anzahl identischer ME-Config-Snapshots vor Apply. | `me_config_stable_snapshots = 3` |
|
||||
| `general.me_config_apply_cooldown_secs` | `u64` | `300` | keine | Cooldown zwischen angewendeten ME-Map-Updates. | `me_config_apply_cooldown_secs = 120` |
|
||||
| `general.proxy_secret_stable_snapshots` | `u8` | `2` | muss `> 0` sein | Anzahl identischer Secret-Snapshots vor Rotation. | `proxy_secret_stable_snapshots = 3` |
|
||||
| `general.proxy_secret_rotate_runtime` | `bool` | `true` | keine | Aktiviert Runtime-Rotation des Proxy-Secrets. | `proxy_secret_rotate_runtime = true` |
|
||||
| `general.proxy_secret_len_max` | `usize` | `256` | Bereich `[32,4096]` | Obergrenze für akzeptierte Secret-Länge. | `proxy_secret_len_max = 512` |
|
||||
| `general.update_every` | `Option<u64>` | `300` | wenn gesetzt: `> 0`; bei `null`: Legacy-Min-Fallback | Einheitliches Refresh-Intervall für ME-Config + Secret-Updater. | `update_every = 300` |
|
||||
| `general.me_pool_drain_ttl_secs` | `u64` | `90` | keine | Zeitraum, in dem stale Writer noch als Fallback zulässig sind. | `me_pool_drain_ttl_secs = 120` |
|
||||
| `general.me_pool_min_fresh_ratio` | `f32` | `0.8` | Bereich `[0.0,1.0]` | Coverage-Schwelle vor Drain der alten Generation. | `me_pool_min_fresh_ratio = 0.9` |
|
||||
| `general.me_reinit_drain_timeout_secs` | `u64` | `120` | `0` = kein Force-Close; wenn `>0 && < TTL`, dann auf TTL angehoben | Force-Close-Timeout für draining stale Writer. | `me_reinit_drain_timeout_secs = 0` |
|
||||
| `general.auto_degradation_enabled` | `bool` | `true` | keine | Reserviertes Kompatibilitätsfeld in aktueller Revision (kein aktiver Runtime-Consumer). | `auto_degradation_enabled = true` |
|
||||
| `general.degradation_min_unavailable_dc_groups` | `u8` | `2` | keine | Reservierter Kompatibilitäts-Schwellenwert in aktueller Revision (kein aktiver Runtime-Consumer). | `degradation_min_unavailable_dc_groups = 2` |
|
||||
|
||||
## Deprecated / Legacy Parameter
|
||||
|
||||
| Parameter | Status | Ersatz | Aktuelles Verhalten | Migrationshinweis |
|
||||
|---|---|---|---|---|
|
||||
| `general.middle_proxy_nat_stun` | Deprecated | `network.stun_servers` | Wird nur dann in `network.stun_servers` gemerged, wenn `network.stun_servers` nicht explizit gesetzt ist. | Wert nach `network.stun_servers` verschieben, Legacy-Key entfernen. |
|
||||
| `general.middle_proxy_nat_stun_servers` | Deprecated | `network.stun_servers` | Wird nur dann in `network.stun_servers` gemerged, wenn `network.stun_servers` nicht explizit gesetzt ist. | Werte nach `network.stun_servers` verschieben, Legacy-Key entfernen. |
|
||||
| `general.proxy_secret_auto_reload_secs` | Deprecated | `general.update_every` | Nur aktiv, wenn `update_every = null` (Legacy-Fallback). | `general.update_every` explizit setzen, Legacy-Key entfernen. |
|
||||
| `general.proxy_config_auto_reload_secs` | Deprecated | `general.update_every` | Nur aktiv, wenn `update_every = null` (Legacy-Fallback). | `general.update_every` explizit setzen, Legacy-Key entfernen. |
|
||||
|
||||
## Wie Upstreams konfiguriert werden
|
||||
|
||||
### Upstream-Schema
|
||||
|
||||
| Feld | Gilt für | Typ | Pflicht | Default | Bedeutung |
|
||||
|---|---|---|---|---|---|
|
||||
| `[[upstreams]].type` | alle Upstreams | `"direct" \| "socks4" \| "socks5"` | ja | n/a | Upstream-Transporttyp. |
|
||||
| `[[upstreams]].weight` | alle Upstreams | `u16` | nein | `1` | Basisgewicht für weighted-random Auswahl. |
|
||||
| `[[upstreams]].enabled` | alle Upstreams | `bool` | nein | `true` | Deaktivierte Einträge werden beim Start ignoriert. |
|
||||
| `[[upstreams]].scopes` | alle Upstreams | `String` | nein | `""` | Komma-separierte Scope-Tags für Request-Routing. |
|
||||
| `interface` | `direct` | `Option<String>` | nein | `null` | Interface-Name (z. B. `eth0`) oder lokale Literal-IP. |
|
||||
| `bind_addresses` | `direct` | `Option<Vec<IpAddr>>` | nein | `null` | Explizite Source-IP-Kandidaten (strikter Vorrang vor `interface`). |
|
||||
| `address` | `socks4` | `String` | ja | n/a | SOCKS4-Server (`ip:port` oder `host:port`). |
|
||||
| `interface` | `socks4` | `Option<String>` | nein | `null` | Wird nur genutzt, wenn `address` als `ip:port` angegeben ist. |
|
||||
| `user_id` | `socks4` | `Option<String>` | nein | `null` | SOCKS4 User-ID für CONNECT. |
|
||||
| `address` | `socks5` | `String` | ja | n/a | SOCKS5-Server (`ip:port` oder `host:port`). |
|
||||
| `interface` | `socks5` | `Option<String>` | nein | `null` | Wird nur genutzt, wenn `address` als `ip:port` angegeben ist. |
|
||||
| `username` | `socks5` | `Option<String>` | nein | `null` | SOCKS5 Benutzername. |
|
||||
| `password` | `socks5` | `Option<String>` | nein | `null` | SOCKS5 Passwort. |
|
||||
|
||||
### Runtime-Regeln (wichtig)
|
||||
|
||||
1. Wenn `[[upstreams]]` fehlt, injiziert der Loader einen Default-`direct`-Upstream.
|
||||
2. Scope-Filterung basiert auf exaktem Token-Match:
|
||||
- mit Request-Scope -> nur Einträge, deren `scopes` genau dieses Token enthält;
|
||||
- ohne Request-Scope -> nur Einträge mit leerem `scopes`.
|
||||
3. Unter healthy Upstreams erfolgt die Auswahl per weighted random: `weight * latency_factor`.
|
||||
4. Gibt es im gefilterten Set keinen healthy Upstream, wird zufällig aus dem gefilterten Set gewählt.
|
||||
5. `direct`-Bind-Auflösung:
|
||||
- zuerst `bind_addresses` (nur gleiche IP-Familie wie Target);
|
||||
- bei `interface` (Name) + `bind_addresses` wird jede Candidate-IP gegen Interface-Adressen validiert;
|
||||
- ungültige Kandidaten werden mit `WARN` verworfen;
|
||||
- bleiben keine gültigen Kandidaten übrig, erfolgt unbound direct connect (`bind_ip=None`);
|
||||
- wenn `bind_addresses` nicht passt, wird `interface` verwendet (Literal-IP oder Interface-Primäradresse).
|
||||
6. Für `socks4/socks5` mit Hostname-`address` ist Interface-Binding nicht unterstützt und wird mit Warnung ignoriert.
|
||||
7. Runtime DNS Overrides werden für Hostname-Auflösung bei Upstream-Verbindungen genutzt.
|
||||
8. Im ME-Modus wird der gewählte Upstream auch für den ME-TCP-Dial-Pfad verwendet.
|
||||
9. Im ME-Modus ist bei `direct` mit bind/interface die STUN-Reflection bind-aware für KDF-Adressmaterial.
|
||||
10. Im ME-Modus werden bei SOCKS-Upstream `BND.ADDR/BND.PORT` für KDF verwendet, wenn gültig/öffentlich und gleiche IP-Familie.
|
||||
|
||||
## Upstream-Konfigurationsbeispiele
|
||||
|
||||
### Beispiel 1: Minimaler direct Upstream
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 1
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Beispiel 2: direct mit Interface + expliziten bind IPs
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
interface = "eth0"
|
||||
bind_addresses = ["192.168.1.100", "192.168.1.101"]
|
||||
weight = 3
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Beispiel 3: SOCKS5 Upstream mit Authentifizierung
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5"
|
||||
address = "198.51.100.30:1080"
|
||||
username = "proxy-user"
|
||||
password = "proxy-pass"
|
||||
weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Beispiel 4: Gemischte Upstreams mit Scopes
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 5
|
||||
enabled = true
|
||||
scopes = ""
|
||||
|
||||
[[upstreams]]
|
||||
type = "socks5"
|
||||
address = "203.0.113.40:1080"
|
||||
username = "edge"
|
||||
password = "edgepass"
|
||||
weight = 3
|
||||
enabled = true
|
||||
scopes = "premium,me"
|
||||
```
|
||||
|
||||
### Beispiel 5: ME-orientiertes Tuning-Profil
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = true
|
||||
proxy_secret_path = "proxy-secret"
|
||||
middle_proxy_nat_probe = true
|
||||
stun_nat_probe_concurrency = 16
|
||||
middle_proxy_pool_size = 12
|
||||
me_keepalive_enabled = true
|
||||
me_keepalive_interval_secs = 20
|
||||
me_keepalive_jitter_secs = 4
|
||||
me_reconnect_max_concurrent_per_dc = 12
|
||||
me_reconnect_backoff_base_ms = 300
|
||||
me_reconnect_backoff_cap_ms = 10000
|
||||
me_reconnect_fast_retry_count = 10
|
||||
hardswap = true
|
||||
me_reinit_every_secs = 600
|
||||
me_hardswap_warmup_delay_min_ms = 500
|
||||
me_hardswap_warmup_delay_max_ms = 1200
|
||||
me_hardswap_warmup_extra_passes = 2
|
||||
me_hardswap_warmup_pass_backoff_base_ms = 400
|
||||
me_config_stable_snapshots = 3
|
||||
me_config_apply_cooldown_secs = 120
|
||||
proxy_secret_stable_snapshots = 3
|
||||
proxy_secret_rotate_runtime = true
|
||||
proxy_secret_len_max = 512
|
||||
update_every = 300
|
||||
me_pool_drain_ttl_secs = 120
|
||||
me_pool_min_fresh_ratio = 0.9
|
||||
me_reinit_drain_timeout_secs = 180
|
||||
|
||||
[timeouts]
|
||||
me_one_retry = 8
|
||||
me_one_timeout_ms = 1200
|
||||
|
||||
[network]
|
||||
stun_use = true
|
||||
stun_tcp_fallback = true
|
||||
stun_servers = [
|
||||
"stun1.l.google.com:19302",
|
||||
"stun2.l.google.com:19302"
|
||||
]
|
||||
http_ip_detect_urls = [
|
||||
"https://api.ipify.org",
|
||||
"https://ifconfig.me/ip"
|
||||
]
|
||||
```
|
||||
219
docs/TUNING.en.md
Normal file
219
docs/TUNING.en.md
Normal file
@@ -0,0 +1,219 @@
|
||||
# Telemt Tuning Guide: Middle-End and Upstreams
|
||||
|
||||
This document describes the current runtime behavior for Middle-End (ME) and upstream routing based on:
|
||||
- `src/config/types.rs`
|
||||
- `src/config/defaults.rs`
|
||||
- `src/config/load.rs`
|
||||
- `src/transport/upstream.rs`
|
||||
|
||||
Defaults below are code defaults (used when a key is omitted), not necessarily values from `config.full.toml` examples.
|
||||
|
||||
## Middle-End Parameters
|
||||
|
||||
### 1) Core ME mode, NAT, and STUN
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Runtime effect | Example |
|
||||
|---|---|---:|---|---|---|
|
||||
| `general.use_middle_proxy` | `bool` | `true` | none | Enables ME transport mode. If `false`, Direct mode is used. | `use_middle_proxy = true` |
|
||||
| `general.proxy_secret_path` | `Option<String>` | `"proxy-secret"` | path may be `null` | Path to Telegram infrastructure proxy-secret file. | `proxy_secret_path = "proxy-secret"` |
|
||||
| `general.middle_proxy_nat_ip` | `Option<IpAddr>` | `null` | valid IP when set | Manual public NAT IP override for ME address material. | `middle_proxy_nat_ip = "203.0.113.10"` |
|
||||
| `general.middle_proxy_nat_probe` | `bool` | `true` | auto-forced to `true` when `use_middle_proxy=true` | Enables ME NAT probing. | `middle_proxy_nat_probe = true` |
|
||||
| `general.stun_nat_probe_concurrency` | `usize` | `8` | must be `> 0` | Max parallel STUN probes during NAT discovery. | `stun_nat_probe_concurrency = 16` |
|
||||
| `network.stun_use` | `bool` | `true` | none | Global STUN switch. If `false`, STUN probing is disabled. | `stun_use = true` |
|
||||
| `network.stun_servers` | `Vec<String>` | built-in public pool | deduplicated + empty values removed | Primary STUN server list for NAT/public endpoint discovery. | `stun_servers = ["stun1.l.google.com:19302"]` |
|
||||
| `network.stun_tcp_fallback` | `bool` | `true` | none | Enables TCP fallback path when UDP STUN is blocked. | `stun_tcp_fallback = true` |
|
||||
| `network.http_ip_detect_urls` | `Vec<String>` | `ifconfig.me` + `api.ipify.org` | none | HTTP fallback for public IPv4 detection if STUN is unavailable. | `http_ip_detect_urls = ["https://api.ipify.org"]` |
|
||||
| `general.stun_iface_mismatch_ignore` | `bool` | `false` | none | Reserved flag in current revision (not consumed by runtime path). | `stun_iface_mismatch_ignore = false` |
|
||||
| `timeouts.me_one_retry` | `u8` | `12` | none | Fast reconnect attempts for single-endpoint DC cases. | `me_one_retry = 6` |
|
||||
| `timeouts.me_one_timeout_ms` | `u64` | `1200` | none | Timeout per quick single-endpoint attempt (ms). | `me_one_timeout_ms = 1500` |
|
||||
|
||||
### 2) Pool size, keepalive, and reconnect policy
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Runtime effect | Example |
|
||||
|---|---|---:|---|---|---|
|
||||
| `general.middle_proxy_pool_size` | `usize` | `8` | none | Target active ME writer pool size. | `middle_proxy_pool_size = 12` |
|
||||
| `general.middle_proxy_warm_standby` | `usize` | `16` | none | Reserved compatibility field in current revision (no active runtime consumer). | `middle_proxy_warm_standby = 16` |
|
||||
| `general.me_keepalive_enabled` | `bool` | `true` | none | Enables periodic ME keepalive/ping traffic. | `me_keepalive_enabled = true` |
|
||||
| `general.me_keepalive_interval_secs` | `u64` | `25` | none | Base keepalive interval (seconds). | `me_keepalive_interval_secs = 20` |
|
||||
| `general.me_keepalive_jitter_secs` | `u64` | `5` | none | Keepalive jitter to avoid synchronization bursts. | `me_keepalive_jitter_secs = 3` |
|
||||
| `general.me_keepalive_payload_random` | `bool` | `true` | none | Randomizes keepalive payload bytes. | `me_keepalive_payload_random = true` |
|
||||
| `general.me_warmup_stagger_enabled` | `bool` | `true` | none | Staggers extra ME warmup dials to avoid spikes. | `me_warmup_stagger_enabled = true` |
|
||||
| `general.me_warmup_step_delay_ms` | `u64` | `500` | none | Base delay between warmup dial steps (ms). | `me_warmup_step_delay_ms = 300` |
|
||||
| `general.me_warmup_step_jitter_ms` | `u64` | `300` | none | Additional random delay for warmup steps (ms). | `me_warmup_step_jitter_ms = 200` |
|
||||
| `general.me_reconnect_max_concurrent_per_dc` | `u32` | `8` | none | Limits concurrent reconnect workers per DC in health recovery. | `me_reconnect_max_concurrent_per_dc = 12` |
|
||||
| `general.me_reconnect_backoff_base_ms` | `u64` | `500` | none | Initial reconnect backoff (ms). | `me_reconnect_backoff_base_ms = 250` |
|
||||
| `general.me_reconnect_backoff_cap_ms` | `u64` | `30000` | none | Maximum reconnect backoff (ms). | `me_reconnect_backoff_cap_ms = 10000` |
|
||||
| `general.me_reconnect_fast_retry_count` | `u32` | `16` | none | Immediate retry budget before long backoff behavior. | `me_reconnect_fast_retry_count = 8` |
|
||||
|
||||
### 3) Reinit/hardswap, secret rotation, and degradation
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Runtime effect | Example |
|
||||
|---|---|---:|---|---|---|
|
||||
| `general.hardswap` | `bool` | `true` | none | Enables generation-based ME hardswap strategy. | `hardswap = true` |
|
||||
| `general.me_reinit_every_secs` | `u64` | `900` | must be `> 0` | Periodic ME reinit interval. | `me_reinit_every_secs = 600` |
|
||||
| `general.me_hardswap_warmup_delay_min_ms` | `u64` | `1000` | must be `<= me_hardswap_warmup_delay_max_ms` | Lower bound for hardswap warmup dial spacing. | `me_hardswap_warmup_delay_min_ms = 500` |
|
||||
| `general.me_hardswap_warmup_delay_max_ms` | `u64` | `2000` | must be `> 0` | Upper bound for hardswap warmup dial spacing. | `me_hardswap_warmup_delay_max_ms = 1200` |
|
||||
| `general.me_hardswap_warmup_extra_passes` | `u8` | `3` | must be within `[0,10]` | Additional warmup passes after base pass. | `me_hardswap_warmup_extra_passes = 2` |
|
||||
| `general.me_hardswap_warmup_pass_backoff_base_ms` | `u64` | `500` | must be `> 0` | Base backoff between extra warmup passes. | `me_hardswap_warmup_pass_backoff_base_ms = 400` |
|
||||
| `general.me_config_stable_snapshots` | `u8` | `2` | must be `> 0` | Number of identical ME config snapshots required before apply. | `me_config_stable_snapshots = 3` |
|
||||
| `general.me_config_apply_cooldown_secs` | `u64` | `300` | none | Cooldown between applied ME map updates. | `me_config_apply_cooldown_secs = 120` |
|
||||
| `general.proxy_secret_stable_snapshots` | `u8` | `2` | must be `> 0` | Number of identical proxy-secret snapshots required before rotation. | `proxy_secret_stable_snapshots = 3` |
|
||||
| `general.proxy_secret_rotate_runtime` | `bool` | `true` | none | Enables runtime proxy-secret rotation. | `proxy_secret_rotate_runtime = true` |
|
||||
| `general.proxy_secret_len_max` | `usize` | `256` | must be within `[32,4096]` | Upper limit for accepted proxy-secret length. | `proxy_secret_len_max = 512` |
|
||||
| `general.update_every` | `Option<u64>` | `300` | if set: must be `> 0`; if `null`: legacy min fallback | Unified refresh interval for ME config + secret updater. | `update_every = 300` |
|
||||
| `general.me_pool_drain_ttl_secs` | `u64` | `90` | none | Time window where stale writers remain fallback-eligible. | `me_pool_drain_ttl_secs = 120` |
|
||||
| `general.me_pool_min_fresh_ratio` | `f32` | `0.8` | must be within `[0.0,1.0]` | Coverage threshold before stale generation can be drained. | `me_pool_min_fresh_ratio = 0.9` |
|
||||
| `general.me_reinit_drain_timeout_secs` | `u64` | `120` | `0` means no force-close; if `>0 && < TTL` it is bumped to TTL | Force-close timeout for draining stale writers. | `me_reinit_drain_timeout_secs = 0` |
|
||||
| `general.auto_degradation_enabled` | `bool` | `true` | none | Reserved compatibility flag in current revision (no active runtime consumer). | `auto_degradation_enabled = true` |
|
||||
| `general.degradation_min_unavailable_dc_groups` | `u8` | `2` | none | Reserved compatibility threshold in current revision (no active runtime consumer). | `degradation_min_unavailable_dc_groups = 2` |
|
||||
|
||||
## Deprecated / Legacy Parameters
|
||||
|
||||
| Parameter | Status | Replacement | Current behavior | Migration recommendation |
|
||||
|---|---|---|---|---|
|
||||
| `general.middle_proxy_nat_stun` | Deprecated | `network.stun_servers` | Merged into `network.stun_servers` only when `network.stun_servers` is not explicitly set. | Move value into `network.stun_servers` and remove legacy key. |
|
||||
| `general.middle_proxy_nat_stun_servers` | Deprecated | `network.stun_servers` | Merged into `network.stun_servers` only when `network.stun_servers` is not explicitly set. | Move values into `network.stun_servers` and remove legacy key. |
|
||||
| `general.proxy_secret_auto_reload_secs` | Deprecated | `general.update_every` | Used only when `update_every = null` (legacy fallback path). | Set `general.update_every` explicitly and remove legacy key. |
|
||||
| `general.proxy_config_auto_reload_secs` | Deprecated | `general.update_every` | Used only when `update_every = null` (legacy fallback path). | Set `general.update_every` explicitly and remove legacy key. |
|
||||
|
||||
## How Upstreams Are Configured
|
||||
|
||||
### Upstream schema
|
||||
|
||||
| Field | Applies to | Type | Required | Default | Meaning |
|
||||
|---|---|---|---|---|---|
|
||||
| `[[upstreams]].type` | all upstreams | `"direct" \| "socks4" \| "socks5"` | yes | n/a | Upstream transport type. |
|
||||
| `[[upstreams]].weight` | all upstreams | `u16` | no | `1` | Base weight for weighted-random selection. |
|
||||
| `[[upstreams]].enabled` | all upstreams | `bool` | no | `true` | Disabled entries are ignored at startup. |
|
||||
| `[[upstreams]].scopes` | all upstreams | `String` | no | `""` | Comma-separated scope tags for request-level routing. |
|
||||
| `interface` | `direct` | `Option<String>` | no | `null` | Interface name (e.g. `eth0`) or literal local IP for bind selection. |
|
||||
| `bind_addresses` | `direct` | `Option<Vec<IpAddr>>` | no | `null` | Explicit local source IP candidates (strict priority over `interface`). |
|
||||
| `address` | `socks4` | `String` | yes | n/a | SOCKS4 server endpoint (`ip:port` or `host:port`). |
|
||||
| `interface` | `socks4` | `Option<String>` | no | `null` | Used only for SOCKS server `ip:port` dial path. |
|
||||
| `user_id` | `socks4` | `Option<String>` | no | `null` | SOCKS4 user ID for CONNECT request. |
|
||||
| `address` | `socks5` | `String` | yes | n/a | SOCKS5 server endpoint (`ip:port` or `host:port`). |
|
||||
| `interface` | `socks5` | `Option<String>` | no | `null` | Used only for SOCKS server `ip:port` dial path. |
|
||||
| `username` | `socks5` | `Option<String>` | no | `null` | SOCKS5 username auth. |
|
||||
| `password` | `socks5` | `Option<String>` | no | `null` | SOCKS5 password auth. |
|
||||
|
||||
### Runtime rules (important)
|
||||
|
||||
1. If `[[upstreams]]` is omitted, loader injects one default `direct` upstream.
|
||||
2. Scope filtering is exact-token based:
|
||||
- when request scope is set -> only entries whose `scopes` contains that exact token;
|
||||
- when request scope is not set -> only entries with empty `scopes`.
|
||||
3. Healthy upstreams are selected by weighted random using: `weight * latency_factor`.
|
||||
4. If no healthy upstream exists in filtered set, random selection is used among filtered entries.
|
||||
5. `direct` bind resolution order:
|
||||
- `bind_addresses` candidates (same IP family as target) first;
|
||||
- if `interface` is an interface name and `bind_addresses` is set, each candidate IP is validated against addresses currently assigned to that interface;
|
||||
- invalid candidates are dropped with `WARN`;
|
||||
- if no valid candidate remains, connection falls back to unbound direct connect (`bind_ip=None`);
|
||||
- if no `bind_addresses` candidate, `interface` is used (literal IP or resolved interface primary IP).
|
||||
6. For `socks4/socks5` with `address` as hostname, interface binding is not supported and is ignored with warning.
|
||||
7. Runtime DNS overrides are used for upstream hostname resolution.
|
||||
8. In ME mode, the selected upstream is also used for ME TCP dial path.
|
||||
9. In ME mode for `direct` upstream with bind/interface, STUN reflection logic is bind-aware for KDF source material.
|
||||
10. In ME mode for SOCKS upstream, SOCKS `BND.ADDR/BND.PORT` is used for KDF when it is valid/public for the same family.
|
||||
|
||||
## Upstream Configuration Examples
|
||||
|
||||
### Example 1: Minimal direct upstream
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 1
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Example 2: Direct with interface + explicit bind addresses
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
interface = "eth0"
|
||||
bind_addresses = ["192.168.1.100", "192.168.1.101"]
|
||||
weight = 3
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Example 3: SOCKS5 upstream with authentication
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5"
|
||||
address = "198.51.100.30:1080"
|
||||
username = "proxy-user"
|
||||
password = "proxy-pass"
|
||||
weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Example 4: Mixed upstreams with scopes
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 5
|
||||
enabled = true
|
||||
scopes = ""
|
||||
|
||||
[[upstreams]]
|
||||
type = "socks5"
|
||||
address = "203.0.113.40:1080"
|
||||
username = "edge"
|
||||
password = "edgepass"
|
||||
weight = 3
|
||||
enabled = true
|
||||
scopes = "premium,me"
|
||||
```
|
||||
|
||||
### Example 5: ME-focused tuning profile
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = true
|
||||
proxy_secret_path = "proxy-secret"
|
||||
middle_proxy_nat_probe = true
|
||||
stun_nat_probe_concurrency = 16
|
||||
middle_proxy_pool_size = 12
|
||||
me_keepalive_enabled = true
|
||||
me_keepalive_interval_secs = 20
|
||||
me_keepalive_jitter_secs = 4
|
||||
me_reconnect_max_concurrent_per_dc = 12
|
||||
me_reconnect_backoff_base_ms = 300
|
||||
me_reconnect_backoff_cap_ms = 10000
|
||||
me_reconnect_fast_retry_count = 10
|
||||
hardswap = true
|
||||
me_reinit_every_secs = 600
|
||||
me_hardswap_warmup_delay_min_ms = 500
|
||||
me_hardswap_warmup_delay_max_ms = 1200
|
||||
me_hardswap_warmup_extra_passes = 2
|
||||
me_hardswap_warmup_pass_backoff_base_ms = 400
|
||||
me_config_stable_snapshots = 3
|
||||
me_config_apply_cooldown_secs = 120
|
||||
proxy_secret_stable_snapshots = 3
|
||||
proxy_secret_rotate_runtime = true
|
||||
proxy_secret_len_max = 512
|
||||
update_every = 300
|
||||
me_pool_drain_ttl_secs = 120
|
||||
me_pool_min_fresh_ratio = 0.9
|
||||
me_reinit_drain_timeout_secs = 180
|
||||
|
||||
[timeouts]
|
||||
me_one_retry = 8
|
||||
me_one_timeout_ms = 1200
|
||||
|
||||
[network]
|
||||
stun_use = true
|
||||
stun_tcp_fallback = true
|
||||
stun_servers = [
|
||||
"stun1.l.google.com:19302",
|
||||
"stun2.l.google.com:19302"
|
||||
]
|
||||
http_ip_detect_urls = [
|
||||
"https://api.ipify.org",
|
||||
"https://ifconfig.me/ip"
|
||||
]
|
||||
```
|
||||
219
docs/TUNING.ru.md
Normal file
219
docs/TUNING.ru.md
Normal file
@@ -0,0 +1,219 @@
|
||||
# Руководство по тюнингу Telemt: Middle-End и Upstreams
|
||||
|
||||
Документ описывает актуальное поведение Middle-End (ME) и маршрутизации через upstream на основе:
|
||||
- `src/config/types.rs`
|
||||
- `src/config/defaults.rs`
|
||||
- `src/config/load.rs`
|
||||
- `src/transport/upstream.rs`
|
||||
|
||||
Значения `Default` ниже — это значения из кода при отсутствии ключа в конфиге, а не обязательно значения из примеров `config.full.toml`.
|
||||
|
||||
## Параметры Middle-End
|
||||
|
||||
### 1) Базовый режим ME, NAT и STUN
|
||||
|
||||
| Параметр | Тип | Default | Ограничения / валидация | Влияние на runtime | Пример |
|
||||
|---|---|---:|---|---|---|
|
||||
| `general.use_middle_proxy` | `bool` | `true` | нет | Включает транспорт ME. При `false` используется Direct-режим. | `use_middle_proxy = true` |
|
||||
| `general.proxy_secret_path` | `Option<String>` | `"proxy-secret"` | путь может быть `null` | Путь к инфраструктурному proxy-secret Telegram. | `proxy_secret_path = "proxy-secret"` |
|
||||
| `general.middle_proxy_nat_ip` | `Option<IpAddr>` | `null` | валидный IP при задании | Ручной override публичного NAT IP для адресного материала ME. | `middle_proxy_nat_ip = "203.0.113.10"` |
|
||||
| `general.middle_proxy_nat_probe` | `bool` | `true` | авто-принудительно `true`, если `use_middle_proxy=true` | Включает NAT probing для ME. | `middle_proxy_nat_probe = true` |
|
||||
| `general.stun_nat_probe_concurrency` | `usize` | `8` | должно быть `> 0` | Максимум параллельных STUN-проб при NAT-детекте. | `stun_nat_probe_concurrency = 16` |
|
||||
| `network.stun_use` | `bool` | `true` | нет | Глобальный переключатель STUN. При `false` STUN отключен. | `stun_use = true` |
|
||||
| `network.stun_servers` | `Vec<String>` | встроенный публичный пул | удаляются дубликаты и пустые значения | Основной список STUN-серверов для NAT/public endpoint discovery. | `stun_servers = ["stun1.l.google.com:19302"]` |
|
||||
| `network.stun_tcp_fallback` | `bool` | `true` | нет | Включает TCP fallback, если UDP STUN недоступен. | `stun_tcp_fallback = true` |
|
||||
| `network.http_ip_detect_urls` | `Vec<String>` | `ifconfig.me` + `api.ipify.org` | нет | HTTP fallback для определения публичного IPv4 при недоступности STUN. | `http_ip_detect_urls = ["https://api.ipify.org"]` |
|
||||
| `general.stun_iface_mismatch_ignore` | `bool` | `false` | нет | Зарезервированный флаг в текущей ревизии (runtime его не использует). | `stun_iface_mismatch_ignore = false` |
|
||||
| `timeouts.me_one_retry` | `u8` | `12` | нет | Количество быстрых reconnect-попыток для DC с одним endpoint. | `me_one_retry = 6` |
|
||||
| `timeouts.me_one_timeout_ms` | `u64` | `1200` | нет | Таймаут одной быстрой попытки (мс). | `me_one_timeout_ms = 1500` |
|
||||
|
||||
### 2) Размер пула, keepalive и reconnect-политика
|
||||
|
||||
| Параметр | Тип | Default | Ограничения / валидация | Влияние на runtime | Пример |
|
||||
|---|---|---:|---|---|---|
|
||||
| `general.middle_proxy_pool_size` | `usize` | `8` | нет | Целевой размер активного пула ME-writer соединений. | `middle_proxy_pool_size = 12` |
|
||||
| `general.middle_proxy_warm_standby` | `usize` | `16` | нет | Зарезервированное поле совместимости в текущей ревизии (активного runtime-consumer нет). | `middle_proxy_warm_standby = 16` |
|
||||
| `general.me_keepalive_enabled` | `bool` | `true` | нет | Включает периодические keepalive/ping кадры ME. | `me_keepalive_enabled = true` |
|
||||
| `general.me_keepalive_interval_secs` | `u64` | `25` | нет | Базовый интервал keepalive (сек). | `me_keepalive_interval_secs = 20` |
|
||||
| `general.me_keepalive_jitter_secs` | `u64` | `5` | нет | Джиттер keepalive для предотвращения синхронных всплесков. | `me_keepalive_jitter_secs = 3` |
|
||||
| `general.me_keepalive_payload_random` | `bool` | `true` | нет | Рандомизирует payload keepalive-кадров. | `me_keepalive_payload_random = true` |
|
||||
| `general.me_warmup_stagger_enabled` | `bool` | `true` | нет | Включает staggered warmup дополнительных ME-коннектов. | `me_warmup_stagger_enabled = true` |
|
||||
| `general.me_warmup_step_delay_ms` | `u64` | `500` | нет | Базовая задержка между шагами warmup (мс). | `me_warmup_step_delay_ms = 300` |
|
||||
| `general.me_warmup_step_jitter_ms` | `u64` | `300` | нет | Дополнительный случайный warmup-джиттер (мс). | `me_warmup_step_jitter_ms = 200` |
|
||||
| `general.me_reconnect_max_concurrent_per_dc` | `u32` | `8` | нет | Ограничивает параллельные reconnect worker'ы на один DC. | `me_reconnect_max_concurrent_per_dc = 12` |
|
||||
| `general.me_reconnect_backoff_base_ms` | `u64` | `500` | нет | Начальный backoff reconnect (мс). | `me_reconnect_backoff_base_ms = 250` |
|
||||
| `general.me_reconnect_backoff_cap_ms` | `u64` | `30000` | нет | Верхняя граница backoff reconnect (мс). | `me_reconnect_backoff_cap_ms = 10000` |
|
||||
| `general.me_reconnect_fast_retry_count` | `u32` | `16` | нет | Бюджет быстрых retry до длинного backoff. | `me_reconnect_fast_retry_count = 8` |
|
||||
|
||||
### 3) Reinit/hardswap, ротация секрета и деградация
|
||||
|
||||
| Параметр | Тип | Default | Ограничения / валидация | Влияние на runtime | Пример |
|
||||
|---|---|---:|---|---|---|
|
||||
| `general.hardswap` | `bool` | `true` | нет | Включает generation-based стратегию hardswap для ME-пула. | `hardswap = true` |
|
||||
| `general.me_reinit_every_secs` | `u64` | `900` | должно быть `> 0` | Интервал периодического reinit ME-пула. | `me_reinit_every_secs = 600` |
|
||||
| `general.me_hardswap_warmup_delay_min_ms` | `u64` | `1000` | должно быть `<= me_hardswap_warmup_delay_max_ms` | Нижняя граница пауз между warmup dial попытками. | `me_hardswap_warmup_delay_min_ms = 500` |
|
||||
| `general.me_hardswap_warmup_delay_max_ms` | `u64` | `2000` | должно быть `> 0` | Верхняя граница пауз между warmup dial попытками. | `me_hardswap_warmup_delay_max_ms = 1200` |
|
||||
| `general.me_hardswap_warmup_extra_passes` | `u8` | `3` | диапазон `[0,10]` | Дополнительные warmup-проходы после базового. | `me_hardswap_warmup_extra_passes = 2` |
|
||||
| `general.me_hardswap_warmup_pass_backoff_base_ms` | `u64` | `500` | должно быть `> 0` | Базовый backoff между extra-pass в warmup. | `me_hardswap_warmup_pass_backoff_base_ms = 400` |
|
||||
| `general.me_config_stable_snapshots` | `u8` | `2` | должно быть `> 0` | Количество одинаковых snapshot перед применением ME map update. | `me_config_stable_snapshots = 3` |
|
||||
| `general.me_config_apply_cooldown_secs` | `u64` | `300` | нет | Cooldown между применёнными обновлениями ME map. | `me_config_apply_cooldown_secs = 120` |
|
||||
| `general.proxy_secret_stable_snapshots` | `u8` | `2` | должно быть `> 0` | Количество одинаковых snapshot перед runtime-rotation proxy-secret. | `proxy_secret_stable_snapshots = 3` |
|
||||
| `general.proxy_secret_rotate_runtime` | `bool` | `true` | нет | Включает runtime-ротацию proxy-secret. | `proxy_secret_rotate_runtime = true` |
|
||||
| `general.proxy_secret_len_max` | `usize` | `256` | диапазон `[32,4096]` | Верхний лимит длины принимаемого proxy-secret. | `proxy_secret_len_max = 512` |
|
||||
| `general.update_every` | `Option<u64>` | `300` | если задано: `> 0`; если `null`: fallback на legacy минимум | Единый интервал refresh для ME config + secret updater. | `update_every = 300` |
|
||||
| `general.me_pool_drain_ttl_secs` | `u64` | `90` | нет | Время, когда stale writer ещё может использоваться как fallback. | `me_pool_drain_ttl_secs = 120` |
|
||||
| `general.me_pool_min_fresh_ratio` | `f32` | `0.8` | диапазон `[0.0,1.0]` | Порог покрытия fresh-поколения перед drain старого поколения. | `me_pool_min_fresh_ratio = 0.9` |
|
||||
| `general.me_reinit_drain_timeout_secs` | `u64` | `120` | `0` = без force-close; если `>0 && < TTL`, поднимается до TTL | Таймаут force-close для draining stale writer. | `me_reinit_drain_timeout_secs = 0` |
|
||||
| `general.auto_degradation_enabled` | `bool` | `true` | нет | Зарезервированный флаг совместимости в текущей ревизии (активного runtime-consumer нет). | `auto_degradation_enabled = true` |
|
||||
| `general.degradation_min_unavailable_dc_groups` | `u8` | `2` | нет | Зарезервированный порог совместимости в текущей ревизии (активного runtime-consumer нет). | `degradation_min_unavailable_dc_groups = 2` |
|
||||
|
||||
## Устаревшие / legacy параметры
|
||||
|
||||
| Параметр | Статус | Замена | Текущее поведение | Рекомендация миграции |
|
||||
|---|---|---|---|---|
|
||||
| `general.middle_proxy_nat_stun` | Deprecated | `network.stun_servers` | Добавляется в `network.stun_servers`, только если `network.stun_servers` не задан явно. | Перенести значение в `network.stun_servers`, legacy-ключ удалить. |
|
||||
| `general.middle_proxy_nat_stun_servers` | Deprecated | `network.stun_servers` | Добавляется в `network.stun_servers`, только если `network.stun_servers` не задан явно. | Перенести значения в `network.stun_servers`, legacy-ключ удалить. |
|
||||
| `general.proxy_secret_auto_reload_secs` | Deprecated | `general.update_every` | Используется только если `update_every = null` (legacy fallback). | Явно задать `general.update_every`, legacy-ключ удалить. |
|
||||
| `general.proxy_config_auto_reload_secs` | Deprecated | `general.update_every` | Используется только если `update_every = null` (legacy fallback). | Явно задать `general.update_every`, legacy-ключ удалить. |
|
||||
|
||||
## Как конфигурируются Upstreams
|
||||
|
||||
### Схема upstream
|
||||
|
||||
| Поле | Применимость | Тип | Обязательно | Default | Назначение |
|
||||
|---|---|---|---|---|---|
|
||||
| `[[upstreams]].type` | все upstream | `"direct" \| "socks4" \| "socks5"` | да | n/a | Тип upstream транспорта. |
|
||||
| `[[upstreams]].weight` | все upstream | `u16` | нет | `1` | Базовый вес в weighted-random выборе. |
|
||||
| `[[upstreams]].enabled` | все upstream | `bool` | нет | `true` | Выключенные записи игнорируются на старте. |
|
||||
| `[[upstreams]].scopes` | все upstream | `String` | нет | `""` | Список scope-токенов через запятую для маршрутизации. |
|
||||
| `interface` | `direct` | `Option<String>` | нет | `null` | Имя интерфейса (например `eth0`) или literal локальный IP. |
|
||||
| `bind_addresses` | `direct` | `Option<Vec<IpAddr>>` | нет | `null` | Явные кандидаты source IP (имеют приоритет над `interface`). |
|
||||
| `address` | `socks4` | `String` | да | n/a | Адрес SOCKS4 сервера (`ip:port` или `host:port`). |
|
||||
| `interface` | `socks4` | `Option<String>` | нет | `null` | Используется только если `address` задан как `ip:port`. |
|
||||
| `user_id` | `socks4` | `Option<String>` | нет | `null` | SOCKS4 user ID в CONNECT-запросе. |
|
||||
| `address` | `socks5` | `String` | да | n/a | Адрес SOCKS5 сервера (`ip:port` или `host:port`). |
|
||||
| `interface` | `socks5` | `Option<String>` | нет | `null` | Используется только если `address` задан как `ip:port`. |
|
||||
| `username` | `socks5` | `Option<String>` | нет | `null` | Логин SOCKS5 auth. |
|
||||
| `password` | `socks5` | `Option<String>` | нет | `null` | Пароль SOCKS5 auth. |
|
||||
|
||||
### Runtime-правила
|
||||
|
||||
1. Если `[[upstreams]]` отсутствует, loader добавляет один upstream `direct` по умолчанию.
|
||||
2. Scope-фильтрация — по точному совпадению токена:
|
||||
- если scope запроса задан -> используются только записи, где `scopes` содержит такой же токен;
|
||||
- если scope запроса не задан -> используются только записи с пустым `scopes`.
|
||||
3. Среди healthy upstream используется weighted-random выбор: `weight * latency_factor`.
|
||||
4. Если в отфильтрованном наборе нет healthy upstream, выбирается случайный из отфильтрованных.
|
||||
5. Порядок выбора bind для `direct`:
|
||||
- сначала `bind_addresses` (только IP нужного семейства);
|
||||
- если одновременно заданы `interface` (имя) и `bind_addresses`, каждый IP проверяется на принадлежность интерфейсу;
|
||||
- несовпадающие IP отбрасываются с `WARN`;
|
||||
- если валидных IP не осталось, используется unbound direct connect (`bind_ip=None`);
|
||||
- если `bind_addresses` не подходит, применяется `interface` (literal IP или адрес интерфейса).
|
||||
6. Для `socks4/socks5` с `address` в виде hostname интерфейсный bind не поддерживается и игнорируется с предупреждением.
|
||||
7. Runtime DNS overrides применяются к резолвингу hostname в upstream-подключениях.
|
||||
8. В ME-режиме выбранный upstream также используется для ME TCP dial path.
|
||||
9. В ME-режиме для `direct` upstream с bind/interface STUN-рефлексия выполняется bind-aware для KDF материала.
|
||||
10. В ME-режиме для SOCKS upstream используются `BND.ADDR/BND.PORT` для KDF, если адрес валиден/публичен и соответствует IP family.
|
||||
|
||||
## Примеры конфигурации Upstreams
|
||||
|
||||
### Пример 1: минимальный direct upstream
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 1
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Пример 2: direct с interface + явными bind IP
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
interface = "eth0"
|
||||
bind_addresses = ["192.168.1.100", "192.168.1.101"]
|
||||
weight = 3
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Пример 3: SOCKS5 upstream с аутентификацией
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5"
|
||||
address = "198.51.100.30:1080"
|
||||
username = "proxy-user"
|
||||
password = "proxy-pass"
|
||||
weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Пример 4: смешанные upstream с scopes
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 5
|
||||
enabled = true
|
||||
scopes = ""
|
||||
|
||||
[[upstreams]]
|
||||
type = "socks5"
|
||||
address = "203.0.113.40:1080"
|
||||
username = "edge"
|
||||
password = "edgepass"
|
||||
weight = 3
|
||||
enabled = true
|
||||
scopes = "premium,me"
|
||||
```
|
||||
|
||||
### Пример 5: профиль тюнинга под ME
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = true
|
||||
proxy_secret_path = "proxy-secret"
|
||||
middle_proxy_nat_probe = true
|
||||
stun_nat_probe_concurrency = 16
|
||||
middle_proxy_pool_size = 12
|
||||
me_keepalive_enabled = true
|
||||
me_keepalive_interval_secs = 20
|
||||
me_keepalive_jitter_secs = 4
|
||||
me_reconnect_max_concurrent_per_dc = 12
|
||||
me_reconnect_backoff_base_ms = 300
|
||||
me_reconnect_backoff_cap_ms = 10000
|
||||
me_reconnect_fast_retry_count = 10
|
||||
hardswap = true
|
||||
me_reinit_every_secs = 600
|
||||
me_hardswap_warmup_delay_min_ms = 500
|
||||
me_hardswap_warmup_delay_max_ms = 1200
|
||||
me_hardswap_warmup_extra_passes = 2
|
||||
me_hardswap_warmup_pass_backoff_base_ms = 400
|
||||
me_config_stable_snapshots = 3
|
||||
me_config_apply_cooldown_secs = 120
|
||||
proxy_secret_stable_snapshots = 3
|
||||
proxy_secret_rotate_runtime = true
|
||||
proxy_secret_len_max = 512
|
||||
update_every = 300
|
||||
me_pool_drain_ttl_secs = 120
|
||||
me_pool_min_fresh_ratio = 0.9
|
||||
me_reinit_drain_timeout_secs = 180
|
||||
|
||||
[timeouts]
|
||||
me_one_retry = 8
|
||||
me_one_timeout_ms = 1200
|
||||
|
||||
[network]
|
||||
stun_use = true
|
||||
stun_tcp_fallback = true
|
||||
stun_servers = [
|
||||
"stun1.l.google.com:19302",
|
||||
"stun2.l.google.com:19302"
|
||||
]
|
||||
http_ip_detect_urls = [
|
||||
"https://api.ipify.org",
|
||||
"https://ifconfig.me/ip"
|
||||
]
|
||||
```
|
||||
321
docs/XRAY-SINGBOX-ROUTING.ru.md
Normal file
321
docs/XRAY-SINGBOX-ROUTING.ru.md
Normal file
@@ -0,0 +1,321 @@
|
||||
# SNI-маршрутизация в xray-core / sing-box + TLS-fronting
|
||||
|
||||
## Термины (в контексте этого кейса)
|
||||
|
||||
- **TLS-fronting домен** — домен, который фигурирует в TLS ClientHello как **SNI** (например, `petrovich.ru`): он используется как "маска" на L7 и как ключ маршрутизации в прокси-роутере.
|
||||
- **xray-core / sing-box** — локальный или удалённый L7/TLS-роутер (прокси), который:
|
||||
1) принимает входящее TCP/TLS-соединение,
|
||||
2) читает TLS ClientHello,
|
||||
3) извлекает SNI,
|
||||
4) по SNI выбирает outbound/апстрим,
|
||||
5) устанавливает новое TCP-соединение к целевому хосту уже **от себя**.
|
||||
- **SNI (Server Name Indication)** — поле в TLS ClientHello, где клиент Telegram сообщает доменное имя для "маскировки"
|
||||
- **DNS-resolve на стороне L7-роутера** — если выходной адрес задан доменом (или роутер решил "всё равно идти по SNI"), то DNS резолвится **на стороне xray/sing-box**, а не на стороне Telegram-клиента
|
||||
|
||||
---
|
||||
|
||||
## Ключевая идея: куда на самом деле идёт соединение решает не то, что вы указали клиенту, а то как L7-роутер трактует SNI
|
||||
|
||||
Механика:
|
||||
|
||||
1) Telegram-клиенту вы можете указать **IP/домен telemt**,как "сервер".
|
||||
2) Между клиентом и telemt стоит xray-core/sing-box, который принимает TCP, читает TLS ClientHello и видит **SNI=petrovich.ru**
|
||||
3) Дальше роутер говорит: "Вижу SNI - направить на апстрим/маршрут N"
|
||||
4) И устанавливает исходящее соединение не "по тому IP, который пользователь подразумевал", а **по домену из SNI** (или по сопоставлению SNI→outbound), используя для определния его IP собственный DNS-кеш или резолвер
|
||||
5) `petrovich.ru` по A-записи указывает **не на IP telemt**, а значит при L7-маршрутизации трафик уйдёт на "оригинальный" сайт за этим доменом, а не в telemt: Telegram-клиент, естественно, не сможет получить ожидаемое поведение, потому что ответить с handshake на той стороне некому
|
||||
|
||||
---
|
||||
|
||||
## Схема №1 "Как это НЕ работает"
|
||||
|
||||
```text
|
||||
Telegram Client
|
||||
|
|
||||
| (указан IP/домен telemt)
|
||||
v
|
||||
telemt instance
|
||||
````
|
||||
|
||||
Ожидание: "я указал telemt -> значит трафик попадёт в telemt" - **нет!**
|
||||
|
||||
---
|
||||
|
||||
## Схема №2. "Как это реально работает с TLS/L7-роутером и SNI"
|
||||
|
||||
```text
|
||||
Telegram Client
|
||||
|
|
||||
| 1) TCP/TLS connection:
|
||||
| - ClientHello:
|
||||
| - SNI=petrovich.ru
|
||||
v
|
||||
xray-core / sing-box / любой L7 router
|
||||
|
|
||||
| 2) читает ClientHello -> вытаскивает SNI
|
||||
| 3) выбирает маршрут по SNI
|
||||
| 4) делает DNS для petrovich.ru
|
||||
| 5) подключается к полученному IP по TLS с этим SNI
|
||||
v
|
||||
"Оригинальный" сайт, A-запись которого не на telemt
|
||||
|
|
||||
X не telemt -> Telegram-клиент не коннектится как ожидалось
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Почему указанный в клиенте IP/домен telemt "не спасает"
|
||||
|
||||
Потому что в таком режиме xray/sing-box выступает как **точка терминации TCP/TLS**, можно сказать - TLS-инспектор на уровне ClientHello, это означает:
|
||||
|
||||
* TCP-сессия от Telegram-клиента заканчивается на xray/sing-box
|
||||
* Дальше создаётся **новая** TCP-сессия "от имени" xray/sing-box к апстриму
|
||||
* Выбор апстрима делается правилами роутинга, а в TLS-сценариях самый удобный и распространённый ключ — **SNI**
|
||||
|
||||
То есть, "куда идти дальше" определяется логикой L7-роутера:
|
||||
|
||||
* либо правилами вида `if SNI == petrovich.ru -> outbound X`,
|
||||
* либо более "автоматическим" поведением: `подключаться к тому хосту, который указан в SNI`,
|
||||
* плюс кэш DNS и собственные резолверы роутера
|
||||
|
||||
---
|
||||
|
||||
## Что именно извлекается из TLS ClientHello и почему этого достаточно
|
||||
|
||||
TLS ClientHello отправляется **в начале** TLS-сессии и, в классическом TLS без ECH, содержит SNI в открытом виде.
|
||||
|
||||
Упрощённо:
|
||||
|
||||
```text
|
||||
ClientHello:
|
||||
- supported_versions
|
||||
- cipher_suites
|
||||
- extensions:
|
||||
- server_name: petrovich.ru <-- SNI
|
||||
- alpn: h2/http1.1/...
|
||||
- ...
|
||||
```
|
||||
|
||||
Роутеру не нужно расшифровывать трафик и завершать TLS "как сервер" — часто достаточно просто прочитать первые пакеты и распарсить ClientHello, чтобы получить SNI и принять решение
|
||||
|
||||
---
|
||||
|
||||
## Типовой алгоритм SNI-роутинга
|
||||
|
||||
1. Принять входящий TCP.
|
||||
2. Подождать первые байты.
|
||||
3. Определить протокол:
|
||||
|
||||
* если видим TLS ClientHello → парсим SNI/ALPN
|
||||
4. Применить route rules:
|
||||
|
||||
* match по `server_name` / `domain` / `tls.sni`
|
||||
5. Выбрать outbound:
|
||||
|
||||
* direct / proxy / specific upstream / detour
|
||||
6. Установить исходящее соединение:
|
||||
|
||||
* либо на фиксированный IP:порт,
|
||||
* либо на домен через DNS-resolve на стороне роутера
|
||||
7. Начать проксирование данных между входом и выходом
|
||||
|
||||
---
|
||||
|
||||
## Почему "A-запись фронтинг-домена не на telemt" ломает кейс
|
||||
|
||||
### Ситуация
|
||||
|
||||
* В ClientHello: `SNI = petrovich.ru`
|
||||
* DNS: `petrovich.ru -> 203.0.113.77` - "оригинальный" сайт
|
||||
* telemt живёт на: `198.51.100.10`
|
||||
|
||||
### Что делает роутер
|
||||
|
||||
* Видит SNI `petrovich.ru`
|
||||
* Либо:
|
||||
|
||||
* (а) напрямую коннектится к `petrovich.ru:443`, резолвя A-запись в `203.0.113.77`,
|
||||
* либо:
|
||||
* (б) выбирает outbound, который указывает на `petrovich.ru` как destination,
|
||||
* либо:
|
||||
* (в) делает sniffing/override destination по SNI
|
||||
|
||||
В итоге исходящий коннект идёт на `203.0.113.77:443`, а не на telemt!
|
||||
Другой сервер, другой протокол, другая логика, где telemt не участвует
|
||||
|
||||
---
|
||||
|
||||
## "Где именно происходит подмена destination на SNI"
|
||||
|
||||
Это зависит от конфигурации, но типовые варианты:
|
||||
|
||||
### Вариант A: outbound задан доменом (и он совпадает с SNI)
|
||||
|
||||
Правило по SNI выбирает outbound, у которого destination задан доменом фронтинга,
|
||||
тогда DNS резолвится на стороне роутера и вы уходите на "оригинальный" хост
|
||||
|
||||
### Вариант B: destination override / sniffing
|
||||
|
||||
Роутер "снифает" SNI и **перезаписывает** destination на домен из SNI (даже если вход изначально был на IP telemt),
|
||||
это особенно коварно: пользователь видит "я подключаюсь к IP telemt", но роутер после sniffing решает иначе
|
||||
|
||||
### Вариант C: split DNS / кеш / независимый резолвер
|
||||
|
||||
Даже если клиент "где-то" резолвит иначе, это не важно: конечный DNS для исходящего коннекта — на стороне xray/sing-box,
|
||||
который может иметь:
|
||||
|
||||
* свой DoH/DoT,
|
||||
* свой кеш,
|
||||
* свои правила fake-ip / system resolver,
|
||||
* и, как следствие, своя "карта" **домен/SNI -> IP**
|
||||
|
||||
---
|
||||
|
||||
## Признаки того, что трафик "утёк на оригинал", а не попал в telemt
|
||||
|
||||
* На стороне telemt отсутствуют входящие соединения/логи
|
||||
* На стороне роутера видно, что destination — домен фронтинга, а IP соответствует публичному сайту
|
||||
* TLS-метрики/сертификат на выходе соответствует "оригинальному" сайту в записах трафика
|
||||
* Telegram-клиент получает неожиданный тип ответов/ошибку handshaking/timeout в debug-режиме
|
||||
|
||||
---
|
||||
|
||||
## Best-practice решение для этого кейса: свой домен фронтинга + заглушка на telemt + Let's Encrypt
|
||||
|
||||
### Цель
|
||||
|
||||
Сделать так, чтобы:
|
||||
|
||||
* SNI (фронтинг-домен) **резолвился в IP telemt**,
|
||||
* на IP telemt реально был TLS-сервис с валидным сертификатом под этот домен,
|
||||
* даже если кто-то "попробует открыть домен как сайт", он увидит нормальную заглушку, а не "пустоту"
|
||||
|
||||
### Что это даёт
|
||||
|
||||
* xray/sing-box, маршрутизируя по SNI, будет неизбежно приходить на telemt, потому что DNS(SNI-домен) → IP telemt
|
||||
* Внешний вид будет правдоподобным: обычный домен с обычным сертификатом
|
||||
* Устойчивость: меньше сюрпризов от DNS-кеша/перерезолва/"умных" правил роутера
|
||||
|
||||
---
|
||||
|
||||
## Рекомендуемая схема (целевое состояние)
|
||||
|
||||
```text
|
||||
Telegram Client
|
||||
|
|
||||
| TLS ClientHello: SNI = hello.example.com
|
||||
v
|
||||
xray-core / sing-box
|
||||
|
|
||||
| Route by SNI -> outbound -> connect to hello.example.com:443
|
||||
| DNS(hello.example.com) = IP telemt
|
||||
v
|
||||
telemt instance (IP telemt)
|
||||
|
|
||||
| TLS cert for hello.example.com (Let's Encrypt)
|
||||
| + сайт-заглушка / health endpoint
|
||||
v
|
||||
OK
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Практический чеклист (минимальный)
|
||||
|
||||
1. Купить/иметь домен: `hello.example.com`
|
||||
2. В DNS:
|
||||
|
||||
* `A hello.example.com -> <IP telemt>`
|
||||
* (опционально) AAAA, если используете IPv6 и он стабилен
|
||||
3. На telemt-хосте:
|
||||
|
||||
* поднять TLS endpoint на 443 с валидным сертификатом LE под `hello.example.com`
|
||||
* отдать "заглушку" (например, статический сайт), чтобы домен выглядел как обычный веб-сервис
|
||||
4. В xray/sing-box правилах:
|
||||
|
||||
* маршрутизировать нужный трафик по SNI = `hello.example.com` в "правильный" outbound (к telemt)
|
||||
* избегать конфигураций, где destination override уводит на чужой домен
|
||||
5. Важно:
|
||||
|
||||
* если вы используете кеш DNS на роутере — сбросить/обновить его после смены A-записи
|
||||
|
||||
---
|
||||
|
||||
## Пояснение про сайт-заглушку
|
||||
|
||||
Для эмуляции TLS, telemt имеет подсистему TLS-F в `src/tls_front`:
|
||||
- её модуль - fetcher, собирает TLS-профили, чтоб максимально поведенчески корректно повторять TLS конкретно указанного сайта
|
||||
|
||||
Когда вы указываете сайт, который не отвечает по TLS:
|
||||
- fetcher не может собрать TLS-профиль и происходит fallback на `fake_cert_len` - примитивный алгоритм,
|
||||
- он забивает служебную информацию TLS рандомными байтами,
|
||||
- простые системы DPI не распознают это
|
||||
- однако, продвинутые системы, такие как nEdge или Fraud Control в сетях мобильной связи легко заблокируют или замедлят такой трафик
|
||||
|
||||
Создав сайт-заглушку с Let's Encrypt сертификатом, вы даёте TLS-F возможность получить данные сертификата и корректно его "повторять" в дальнейшем
|
||||
|
||||
---
|
||||
|
||||
## Вариант конфиг-подхода: "SNI строго привязываем к telemt - фиксированный IP"
|
||||
|
||||
Чтобы полностью исключить зависимость от DNS если вам это нужно, можно сделать outbound, который ходит на **фиксированный IP telemt**, но при этом выставляет SNI/Host как `hello.example.com`.
|
||||
|
||||
Идея:
|
||||
|
||||
* destination: `IP:443`
|
||||
* SNI: `hello.example.com`
|
||||
* сертификат на telemt именно под `hello.example.com`
|
||||
|
||||
Так вы получаете:
|
||||
|
||||
* TLS выглядит корректно, ведь SNI совпадает с сертификатом,
|
||||
* а routing никогда не уйдёт на "оригинал", потому что A-запись указывает на telemt и контроллируется вами!
|
||||
|
||||
Но в вашем описании проблема как раз в том, что роутер "сам решает по SNI и резолвит домен", поэтому самый универсальный вариант — сделать так, чтобы DNS всегда приводил в telemt
|
||||
|
||||
---
|
||||
|
||||
## Пример логики правил на псевдоконфиге L7-роутера
|
||||
|
||||
```text
|
||||
if inbound is TLS and sni == "hello.example.com":
|
||||
route -> outbound "telemt"
|
||||
else:
|
||||
route -> outbound "default"
|
||||
```
|
||||
|
||||
Outbound `telemt`:
|
||||
|
||||
* destination: `hello.example.com:443`
|
||||
* TLS enabled
|
||||
* SNI: `hello.example.com`
|
||||
|
||||
---
|
||||
|
||||
## Отдельно: что может неожиданно сломать даже "правильный" DNS
|
||||
|
||||
* **Кеширование DNS** на xray/sing-box или на системном резолвере, особенно при смене A-записи
|
||||
* **Split-horizon DNS**: разные ответы внутри/снаружи, попытки подмены/терминирования в других точках
|
||||
* **IPv6**: если есть AAAA и он указывает не туда, роутер может предпочесть IPv6: помните, что поддержка v6 нестабильна и не рекомендуется в prod
|
||||
* **DoH/DoT** на роутере: он может резолвить не тем резолвером, которым вы проверяли
|
||||
|
||||
Минимальная гигиена:
|
||||
|
||||
* контролировать A/AAAA,
|
||||
* держать TTL разумным,
|
||||
* проверять, каким резолвером пользуется именно роутер,
|
||||
* при необходимости отключить/ограничить destination override
|
||||
|
||||
---
|
||||
|
||||
## Итог
|
||||
|
||||
В режиме TLS-fronting с xray-core/sing-box как L7/TLS-роутером **SNI становится приоритетным "source-of-truth" для маршрутизации**
|
||||
|
||||
Если фронтинг-домен по DNS указывает не на IP telemt, роутер честно уводит трафик на "оригинальный" сайт, потому что он строит исходящее соединение "по SNI"
|
||||
|
||||
Надёжное решение для этого кейса:
|
||||
|
||||
* использовать **свой домен** для фронтинга,
|
||||
* направить его **A/AAAA** на IP telemt,
|
||||
* поднять на telemt **TLS-сервис с Let’s Encrypt сертификатом** под этот домен,
|
||||
* (желательно) держать **сайт-заглушку**, чтобы 443 выглядел как обычный HTTPS
|
||||
285
docs/model/MODEL.en.md
Normal file
285
docs/model/MODEL.en.md
Normal file
@@ -0,0 +1,285 @@
|
||||
# Telemt Runtime Model
|
||||
|
||||
## Scope
|
||||
This document defines runtime concepts used by the Middle-End (ME) transport pipeline and the orchestration logic around it.
|
||||
|
||||
It focuses on:
|
||||
- `ME Pool / Reader / Writer / Refill / Registry`
|
||||
- `Adaptive Floor`
|
||||
- `Trio-State`
|
||||
- `Generation Lifecycle`
|
||||
|
||||
## Core Entities
|
||||
|
||||
### ME Pool
|
||||
`ME Pool` is the runtime orchestrator for all Middle-End writers.
|
||||
|
||||
Responsibilities:
|
||||
- Holds writer inventory by DC/family/endpoint.
|
||||
- Maintains routing primitives and writer selection policy.
|
||||
- Tracks generation state (`active`, `warm`, `draining` context).
|
||||
- Applies runtime policies (floor mode, refill, reconnect, reinit, fallback behavior).
|
||||
- Exposes readiness gates used by admission logic (for conditional accept/cast behavior).
|
||||
|
||||
Non-goals:
|
||||
- It does not own client protocol decoding.
|
||||
- It does not own per-client business policy (quotas/limits).
|
||||
|
||||
### ME Writer
|
||||
`ME Writer` is a long-lived ME RPC tunnel bound to one concrete ME endpoint (`ip:port`), with:
|
||||
- Outbound command channel (send path).
|
||||
- Associated reader loop (inbound path).
|
||||
- Health/degraded flags.
|
||||
- Contour/state and generation metadata.
|
||||
|
||||
A writer is the actual data plane carrier for client sessions once bound.
|
||||
|
||||
### ME Reader
|
||||
`ME Reader` is the inbound parser/dispatcher for one writer:
|
||||
- Reads/decrypts ME RPC frames.
|
||||
- Validates sequence/checksum.
|
||||
- Routes payloads to client-connection channels via `Registry`.
|
||||
- Emits close/ack/data events and updates telemetry.
|
||||
|
||||
Design intent:
|
||||
- Reader must stay non-blocking as much as possible.
|
||||
- Backpressure on a single client route must not stall the whole writer stream.
|
||||
|
||||
### Refill
|
||||
`Refill` is the recovery mechanism that restores writer coverage when capacity drops:
|
||||
- Per-endpoint restore (same endpoint first).
|
||||
- Per-DC restore to satisfy required floor.
|
||||
- Optional outage-mode/shadow behavior for fragile single-endpoint DCs.
|
||||
|
||||
Refill works asynchronously and should not block hot routing paths.
|
||||
|
||||
### Registry
|
||||
`Registry` is the routing index between ME and client sessions:
|
||||
- `conn_id -> client response channel`
|
||||
- `conn_id <-> writer_id` binding map
|
||||
- writer activity snapshots and idle tracking
|
||||
|
||||
Main invariants:
|
||||
- A `conn_id` routes to at most one active response channel.
|
||||
- Writer loss triggers safe unbind/cleanup and close propagation.
|
||||
- Registry state is the source of truth for active ME-bound session mapping.
|
||||
|
||||
## Adaptive Floor
|
||||
|
||||
### What it is
|
||||
`Adaptive Floor` is a runtime policy that changes target writer count per DC based on observed activity, instead of always holding static peak floor.
|
||||
|
||||
### Why it exists
|
||||
Goals:
|
||||
- Reduce idle writer churn under low traffic.
|
||||
- Keep enough warm capacity to avoid client-visible stalls on burst recovery.
|
||||
- Limit needless reconnect storms on unstable endpoints.
|
||||
|
||||
### Behavioral model
|
||||
- Under activity: floor converges toward configured static requirement.
|
||||
- Under prolonged idle: floor can shrink to a safe minimum.
|
||||
- Recovery/grace windows prevent aggressive oscillation.
|
||||
|
||||
### Safety constraints
|
||||
- Never violate minimal survivability floor for a DC group.
|
||||
- Refill must still restore quickly on demand.
|
||||
- Floor adaptation must not force-drop already bound healthy sessions.
|
||||
|
||||
## Trio-State
|
||||
|
||||
`Trio-State` is writer contouring:
|
||||
- `Warm`
|
||||
- `Active`
|
||||
- `Draining`
|
||||
|
||||
### State semantics
|
||||
- `Warm`: connected and validated, not primary for new binds.
|
||||
- `Active`: preferred for new binds and normal traffic.
|
||||
- `Draining`: no new regular binds; existing sessions continue until graceful retirement rules apply.
|
||||
|
||||
### Transition intent
|
||||
- `Warm -> Active`: when coverage/readiness conditions are satisfied.
|
||||
- `Active -> Draining`: on generation swap, endpoint replacement, or controlled retirement.
|
||||
- `Draining -> removed`: after drain TTL/force-close policy (or when naturally empty).
|
||||
|
||||
This separation reduces SPOF and keeps cutovers predictable.
|
||||
|
||||
## Generation Lifecycle
|
||||
|
||||
Generation isolates pool epochs during reinit/reconfiguration.
|
||||
|
||||
### Lifecycle phases
|
||||
1. `Bootstrap`: initial writers are established.
|
||||
2. `Warmup`: next generation writers are created and validated.
|
||||
3. `Activation`: generation promoted to active when coverage gate passes.
|
||||
4. `Drain`: previous generation becomes draining, existing sessions are allowed to finish.
|
||||
5. `Retire`: old generation writers are removed after graceful rules.
|
||||
|
||||
### Operational guarantees
|
||||
- No partial generation activation without minimum coverage.
|
||||
- Existing healthy client sessions should not be dropped just because a new generation appears.
|
||||
- Draining generation exists to absorb in-flight traffic during swap.
|
||||
|
||||
### Readiness and admission
|
||||
Pool readiness is not equivalent to “all endpoints fully saturated”.
|
||||
Typical gating strategy:
|
||||
- Open admission when per-DC minimal alive coverage exists.
|
||||
- Continue background saturation for multi-endpoint DCs.
|
||||
|
||||
This keeps startup latency low while preserving eventual full capacity.
|
||||
|
||||
## Interactions Between Concepts
|
||||
|
||||
- `Generation` defines pool epochs.
|
||||
- `Trio-State` defines per-writer role inside/around those epochs.
|
||||
- `Adaptive Floor` defines how much capacity should be maintained right now.
|
||||
- `Refill` is the actuator that closes the gap between desired and current capacity.
|
||||
- `Registry` keeps per-session routing correctness while all of the above changes over time.
|
||||
|
||||
## Architectural Approach
|
||||
|
||||
### Layered Design
|
||||
The runtime is intentionally split into two planes:
|
||||
- `Control Plane`: decides desired topology and policy (`floor`, `generation swap`, `refill`, `fallback`).
|
||||
- `Data Plane`: executes packet/session transport (`reader`, `writer`, routing, acks, close propagation).
|
||||
|
||||
Architectural rule:
|
||||
- Control Plane may change writer inventory and policy.
|
||||
- Data Plane must remain stable and low-latency while those changes happen.
|
||||
|
||||
### Ownership Model
|
||||
Ownership is centered around explicit state domains:
|
||||
- `MePool` owns writer lifecycle and policy state.
|
||||
- `Registry` owns per-connection routing bindings.
|
||||
- `Writer task` owns outbound ME socket send progression.
|
||||
- `Reader task` owns inbound ME socket parsing and event dispatch.
|
||||
|
||||
This prevents accidental cross-layer mutation and keeps invariants local.
|
||||
|
||||
### Control Plane Responsibilities
|
||||
Control Plane is event-driven and policy-driven:
|
||||
- Startup initialization and readiness gates.
|
||||
- Runtime reinit (periodic or config-triggered).
|
||||
- Coverage checks per DC/family/endpoint group.
|
||||
- Floor enforcement (static/adaptive).
|
||||
- Refill scheduling and retry orchestration.
|
||||
- Generation transition (`warm -> active`, previous `active -> draining`).
|
||||
|
||||
Control Plane must prioritize determinism over short-term aggressiveness.
|
||||
|
||||
### Data Plane Responsibilities
|
||||
Data Plane is throughput-first and allocation-sensitive:
|
||||
- Session bind to writer.
|
||||
- Per-frame parsing/validation and dispatch.
|
||||
- Ack and close signal propagation.
|
||||
- Route drop behavior under missing connection or closed channel.
|
||||
- Minimal critical logging in hot path.
|
||||
|
||||
Data Plane should avoid waiting on operations that are not strictly required for frame correctness.
|
||||
|
||||
## Concurrency and Synchronization
|
||||
|
||||
### Concurrency Principles
|
||||
- Per-writer isolation: each writer has independent send/read task loops.
|
||||
- Per-connection isolation: client channel state is scoped by `conn_id`.
|
||||
- Asynchronous recovery: refill/reconnect runs outside the packet hot path.
|
||||
|
||||
### Synchronization Strategy
|
||||
- Shared maps use fine-grained, short-lived locking.
|
||||
- Read-mostly paths avoid broad write-lock windows.
|
||||
- Backpressure decisions are localized at route/channel boundary.
|
||||
|
||||
Design target:
|
||||
- A slow consumer should degrade only itself (or its route), not global writer progress.
|
||||
|
||||
### Cancellation and Shutdown
|
||||
Writer and reader loops are cancellation-aware:
|
||||
- explicit cancel token / close command support;
|
||||
- safe unbind and cleanup via registry;
|
||||
- deterministic order: stop admission -> drain/close -> release resources.
|
||||
|
||||
## Consistency Model
|
||||
|
||||
### Session Consistency
|
||||
For one `conn_id`:
|
||||
- exactly one active route target at a time;
|
||||
- close and unbind must be idempotent;
|
||||
- writer loss must not leave dangling bindings.
|
||||
|
||||
### Generation Consistency
|
||||
Generational consistency guarantees:
|
||||
- New generation is not promoted before minimum coverage gate.
|
||||
- Previous generation remains available in `draining` state during handover.
|
||||
- Forced retirement is policy-bound (`drain ttl`, optional force-close), not immediate.
|
||||
|
||||
### Policy Consistency
|
||||
Policy changes (`adaptive/static floor`, fallback mode, retries) should apply without violating established active-session routing invariants.
|
||||
|
||||
## Backpressure and Flow Control
|
||||
|
||||
### Route-Level Backpressure
|
||||
Route channels are bounded by design.
|
||||
When pressure increases:
|
||||
- short burst absorption is allowed;
|
||||
- prolonged congestion triggers controlled drop semantics;
|
||||
- drop accounting is explicit via metrics/counters.
|
||||
|
||||
### Reader Non-Blocking Priority
|
||||
Inbound ME reader path should never be serialized behind one congested client route.
|
||||
Practical implication:
|
||||
- prefer non-blocking route attempt in the parser loop;
|
||||
- move heavy recovery to async side paths.
|
||||
|
||||
## Failure Domain Strategy
|
||||
|
||||
### Endpoint-Level Failure
|
||||
Failure of one endpoint should trigger endpoint-scoped recovery first:
|
||||
- same endpoint reconnect;
|
||||
- endpoint replacement within same DC group if applicable.
|
||||
|
||||
### DC-Level Degradation
|
||||
If a DC group cannot satisfy floor:
|
||||
- keep service via remaining coverage if policy allows;
|
||||
- continue asynchronous refill saturation in background.
|
||||
|
||||
### Whole-Pool Readiness Loss
|
||||
If no sufficient ME coverage exists:
|
||||
- admission gate can hold new accepts (conditional policy);
|
||||
- existing sessions should continue when their path remains healthy.
|
||||
|
||||
## Performance Architecture Notes
|
||||
|
||||
### Hotpath Discipline
|
||||
Allowed in hotpath:
|
||||
- fixed-size parsing and cheap validation;
|
||||
- bounded channel operations;
|
||||
- precomputed or low-allocation access patterns.
|
||||
|
||||
Avoid in hotpath:
|
||||
- repeated expensive decoding;
|
||||
- broad locks with awaits inside critical sections;
|
||||
- verbose high-frequency logging.
|
||||
|
||||
### Throughput Stability Over Peak Spikes
|
||||
Architecture prefers stable throughput and predictable latency over short peak gains that increase churn or long-tail reconnect times.
|
||||
|
||||
## Evolution and Extension Rules
|
||||
|
||||
To evolve this model safely:
|
||||
- Add new policy knobs in Control Plane first.
|
||||
- Keep Data Plane contracts stable (`conn_id`, route semantics, close semantics).
|
||||
- Validate generation and registry invariants before enabling by default.
|
||||
- Introduce new retry/recovery strategies behind explicit config.
|
||||
|
||||
## Failure and Recovery Notes
|
||||
|
||||
- Single-endpoint DC failure is a normal degraded mode case; policy should prioritize fast reconnect and optional shadow/probing strategies.
|
||||
- Idle close by peer should be treated as expected when upstream enforces idle timeout.
|
||||
- Reconnect backoff must protect against synchronized churn while still allowing fast first retries.
|
||||
- Fallback (`ME -> direct DC`) is a policy switch, not a transport bug by itself.
|
||||
|
||||
## Terminology Summary
|
||||
- `Coverage`: enough live writers to satisfy per-DC acceptance policy.
|
||||
- `Floor`: target minimum writer count policy.
|
||||
- `Churn`: frequent writer reconnect/remove cycles.
|
||||
- `Hotpath`: per-packet/per-connection data path where extra waits/allocations are expensive.
|
||||
285
docs/model/MODEL.ru.md
Normal file
285
docs/model/MODEL.ru.md
Normal file
@@ -0,0 +1,285 @@
|
||||
# Runtime-модель Telemt
|
||||
|
||||
## Область описания
|
||||
Документ фиксирует ключевые runtime-понятия пайплайна Middle-End (ME) и оркестрации вокруг него.
|
||||
|
||||
Фокус:
|
||||
- `ME Pool / Reader / Writer / Refill / Registry`
|
||||
- `Adaptive Floor`
|
||||
- `Trio-State`
|
||||
- `Generation Lifecycle`
|
||||
|
||||
## Базовые сущности
|
||||
|
||||
### ME Pool
|
||||
`ME Pool` — центральный оркестратор всех Middle-End writer-ов.
|
||||
|
||||
Зона ответственности:
|
||||
- хранит инвентарь writer-ов по DC/family/endpoint;
|
||||
- управляет выбором writer-а и маршрутизацией;
|
||||
- ведёт состояние поколений (`active`, `warm`, `draining` контекст);
|
||||
- применяет runtime-политики (floor, refill, reconnect, reinit, fallback);
|
||||
- отдаёт сигналы готовности для admission-логики (conditional accept/cast).
|
||||
|
||||
Что не делает:
|
||||
- не декодирует клиентский протокол;
|
||||
- не реализует бизнес-политику пользователя (квоты/лимиты).
|
||||
|
||||
### ME Writer
|
||||
`ME Writer` — долгоживущий ME RPC-канал к конкретному endpoint (`ip:port`), у которого есть:
|
||||
- канал команд на отправку;
|
||||
- связанный reader loop для входящего потока;
|
||||
- флаги состояния/деградации;
|
||||
- метаданные contour/state и generation.
|
||||
|
||||
Writer — это фактический data-plane носитель клиентских сессий после бинда.
|
||||
|
||||
### ME Reader
|
||||
`ME Reader` — входной parser/dispatcher одного writer-а:
|
||||
- читает и расшифровывает ME RPC-фреймы;
|
||||
- проверяет sequence/checksum;
|
||||
- маршрутизирует payload в client-каналы через `Registry`;
|
||||
- обрабатывает close/ack/data и обновляет телеметрию.
|
||||
|
||||
Инженерный принцип:
|
||||
- Reader должен оставаться неблокирующим.
|
||||
- Backpressure одной клиентской сессии не должен останавливать весь поток writer-а.
|
||||
|
||||
### Refill
|
||||
`Refill` — механизм восстановления покрытия writer-ов при просадке:
|
||||
- восстановление на том же endpoint в первую очередь;
|
||||
- восстановление по DC до требуемого floor;
|
||||
- опциональные outage/shadow-режимы для хрупких single-endpoint DC.
|
||||
|
||||
Refill работает асинхронно и не должен блокировать hotpath.
|
||||
|
||||
### Registry
|
||||
`Registry` — маршрутизационный индекс между ME и клиентскими сессиями:
|
||||
- `conn_id -> канал ответа клиенту`;
|
||||
- map биндов `conn_id <-> writer_id`;
|
||||
- снимки активности writer-ов и idle-трекинг.
|
||||
|
||||
Ключевые инварианты:
|
||||
- один `conn_id` маршрутизируется максимум в один активный канал ответа;
|
||||
- потеря writer-а приводит к безопасному unbind/cleanup и отправке close;
|
||||
- именно `Registry` является источником истины по активным ME-биндам.
|
||||
|
||||
## Adaptive Floor
|
||||
|
||||
### Что это
|
||||
`Adaptive Floor` — runtime-политика, которая динамически меняет целевое число writer-ов на DC в зависимости от активности, а не держит всегда фиксированный статический floor.
|
||||
|
||||
### Зачем
|
||||
Цели:
|
||||
- уменьшить churn на idle-трафике;
|
||||
- сохранить достаточную прогретую ёмкость для быстрых всплесков;
|
||||
- снизить лишние reconnect-штормы на нестабильных endpoint.
|
||||
|
||||
### Модель поведения
|
||||
- при активности floor стремится к статическому требованию;
|
||||
- при длительном idle floor может снижаться до безопасного минимума;
|
||||
- grace/recovery окна не дают системе "флапать" слишком резко.
|
||||
|
||||
### Ограничения безопасности
|
||||
- нельзя нарушать минимальный floor выживаемости DC-группы;
|
||||
- refill обязан быстро нарастить покрытие по запросу;
|
||||
- адаптация не должна принудительно ронять уже привязанные healthy-сессии.
|
||||
|
||||
## Trio-State
|
||||
|
||||
`Trio-State` — контурная роль writer-а:
|
||||
- `Warm`
|
||||
- `Active`
|
||||
- `Draining`
|
||||
|
||||
### Семантика состояний
|
||||
- `Warm`: writer подключён и валиден, но не основной для новых биндов.
|
||||
- `Active`: приоритетный для новых биндов и обычного трафика.
|
||||
- `Draining`: новые обычные бинды не назначаются; текущие сессии живут до правил graceful-вывода.
|
||||
|
||||
### Логика переходов
|
||||
- `Warm -> Active`: когда достигнуты условия покрытия/готовности.
|
||||
- `Active -> Draining`: при swap поколения, замене endpoint или контролируемом выводе.
|
||||
- `Draining -> removed`: после drain TTL/force-close политики (или естественного опустошения).
|
||||
|
||||
Такое разделение снижает SPOF-риски и делает cutover предсказуемым.
|
||||
|
||||
## Generation Lifecycle
|
||||
|
||||
Generation изолирует эпохи пула при reinit/reconfiguration.
|
||||
|
||||
### Фазы жизненного цикла
|
||||
1. `Bootstrap`: поднимается начальный набор writer-ов.
|
||||
2. `Warmup`: создаётся и валидируется новое поколение.
|
||||
3. `Activation`: новое поколение становится active после прохождения coverage-gate.
|
||||
4. `Drain`: предыдущее поколение переводится в draining, текущим сессиям дают завершиться.
|
||||
5. `Retire`: старое поколение удаляется по graceful-правилам.
|
||||
|
||||
### Операционные гарантии
|
||||
- нельзя активировать поколение частично без минимального покрытия;
|
||||
- healthy-клиенты не должны теряться только из-за появления нового поколения;
|
||||
- draining-поколение служит буфером для in-flight трафика во время swap.
|
||||
|
||||
### Готовность и приём клиентов
|
||||
Готовность пула не равна "все endpoint полностью насыщены".
|
||||
Типичная стратегия:
|
||||
- открыть admission при минимально достаточном alive-покрытии по DC;
|
||||
- параллельно продолжать saturation для multi-endpoint DC.
|
||||
|
||||
Это уменьшает startup latency и сохраняет выход на полную ёмкость.
|
||||
|
||||
## Как понятия связаны между собой
|
||||
|
||||
- `Generation` задаёт эпохи пула.
|
||||
- `Trio-State` задаёт роль каждого writer-а внутри/между эпохами.
|
||||
- `Adaptive Floor` задаёт, сколько ёмкости нужно сейчас.
|
||||
- `Refill` — исполнитель, который закрывает разницу между desired и current capacity.
|
||||
- `Registry` гарантирует корректную маршрутизацию сессий, пока всё выше меняется.
|
||||
|
||||
## Архитектурный подход
|
||||
|
||||
### Слоистая модель
|
||||
Runtime специально разделён на две плоскости:
|
||||
- `Control Plane`: принимает решения о целевой топологии и политиках (`floor`, `generation swap`, `refill`, `fallback`).
|
||||
- `Data Plane`: исполняет транспорт сессий и пакетов (`reader`, `writer`, маршрутизация, ack, close).
|
||||
|
||||
Ключевое правило:
|
||||
- Control Plane может менять состав writer-ов и policy.
|
||||
- Data Plane должен оставаться стабильным и низколатентным в момент этих изменений.
|
||||
|
||||
### Модель владения состоянием
|
||||
Владение разделено по доменам:
|
||||
- `MePool` владеет жизненным циклом writer-ов и policy-state.
|
||||
- `Registry` владеет routing-биндами клиентских сессий.
|
||||
- `Writer task` владеет исходящей прогрессией ME-сокета.
|
||||
- `Reader task` владеет входящим парсингом и dispatch-событиями.
|
||||
|
||||
Это ограничивает побочные мутации и локализует инварианты.
|
||||
|
||||
### Обязанности Control Plane
|
||||
Control Plane работает событийно и policy-ориентированно:
|
||||
- стартовая инициализация и readiness-gate;
|
||||
- runtime reinit (периодический и/или по изменению конфигурации);
|
||||
- проверки покрытия по DC/family/endpoint group;
|
||||
- применение floor-политики (static/adaptive);
|
||||
- планирование refill и orchestration retry;
|
||||
- переходы поколений (`warm -> active`, прежний `active -> draining`).
|
||||
|
||||
Для него важнее детерминизм, чем агрессивная краткосрочная реакция.
|
||||
|
||||
### Обязанности Data Plane
|
||||
Data Plane ориентирован на пропускную способность и предсказуемую задержку:
|
||||
- bind клиентской сессии к writer-у;
|
||||
- per-frame parsing/validation/dispatch;
|
||||
- распространение ack/close;
|
||||
- корректная реакция на missing conn/closed channel;
|
||||
- минимальный лог-шум в hotpath.
|
||||
|
||||
Data Plane не должен ждать операций, не критичных для корректности текущего фрейма.
|
||||
|
||||
## Конкурентность и синхронизация
|
||||
|
||||
### Принципы конкурентности
|
||||
- Изоляция по writer-у: у каждого writer-а независимые send/read loop.
|
||||
- Изоляция по сессии: состояние канала локально для `conn_id`.
|
||||
- Асинхронное восстановление: refill/reconnect выполняются вне пакетного hotpath.
|
||||
|
||||
### Стратегия синхронизации
|
||||
- Для shared map используются короткие и узкие lock-секции.
|
||||
- Read-heavy пути избегают длительных write-lock окон.
|
||||
- Решения по backpressure локализованы на границе route/channel.
|
||||
|
||||
Цель:
|
||||
- медленный consumer должен деградировать локально, не останавливая глобальный прогресс writer-а.
|
||||
|
||||
### Cancellation и shutdown
|
||||
Reader/Writer loop должны быть cancellation-aware:
|
||||
- явные cancel token / close command;
|
||||
- безопасный unbind/cleanup через registry;
|
||||
- детерминированный порядок: stop admission -> drain/close -> release resources.
|
||||
|
||||
## Модель согласованности
|
||||
|
||||
### Согласованность сессии
|
||||
Для одного `conn_id`:
|
||||
- одновременно ровно один активный route-target;
|
||||
- close/unbind операции идемпотентны;
|
||||
- потеря writer-а не оставляет dangling-бинды.
|
||||
|
||||
### Согласованность поколения
|
||||
Гарантии generation:
|
||||
- новое поколение не активируется до прохождения минимального coverage-gate;
|
||||
- предыдущее поколение остаётся в `draining` на время handover;
|
||||
- принудительный вывод writer-ов ограничен policy (`drain ttl`, optional force-close), а не мгновенный.
|
||||
|
||||
### Согласованность политик
|
||||
Изменение policy (`adaptive/static floor`, fallback mode, retries) не должно ломать инварианты маршрутизации уже активных сессий.
|
||||
|
||||
## Backpressure и управление потоком
|
||||
|
||||
### Route-level backpressure
|
||||
Route-каналы намеренно bounded.
|
||||
При росте нагрузки:
|
||||
- кратковременный burst поглощается;
|
||||
- длительная перегрузка переходит в контролируемую drop-семантику;
|
||||
- все drop-сценарии должны быть прозрачно видны в метриках.
|
||||
|
||||
### Приоритет неблокирующего Reader
|
||||
Входящий ME-reader path не должен сериализоваться из-за одной перегруженной клиентской сессии.
|
||||
Практически это означает:
|
||||
- использовать неблокирующую попытку route в parser loop;
|
||||
- выносить тяжёлое восстановление в асинхронные side-path.
|
||||
|
||||
## Стратегия доменов отказа
|
||||
|
||||
### Отказ отдельного endpoint
|
||||
Сначала применяется endpoint-local recovery:
|
||||
- reconnect в тот же endpoint;
|
||||
- затем замена endpoint внутри той же DC-группы (если доступно).
|
||||
|
||||
### Деградация уровня DC
|
||||
Если DC-группа не набирает floor:
|
||||
- сервис сохраняется на остаточном покрытии (если policy разрешает);
|
||||
- saturation refill продолжается асинхронно в фоне.
|
||||
|
||||
### Потеря готовности всего пула
|
||||
Если достаточного ME-покрытия нет:
|
||||
- admission gate может временно закрыть приём новых подключений (conditional policy);
|
||||
- уже активные сессии продолжают работать, пока их маршрут остаётся healthy.
|
||||
|
||||
## Архитектурные заметки по производительности
|
||||
|
||||
### Дисциплина hotpath
|
||||
Допустимо в hotpath:
|
||||
- фиксированный и дешёвый parsing/validation;
|
||||
- bounded channel operations;
|
||||
- precomputed/low-allocation доступ к данным.
|
||||
|
||||
Нежелательно в hotpath:
|
||||
- повторные дорогие decode;
|
||||
- широкие lock-секции с `await` внутри;
|
||||
- высокочастотный подробный logging.
|
||||
|
||||
### Стабильность важнее пиков
|
||||
Архитектура приоритетно выбирает стабильную пропускную способность и предсказуемую latency, а не краткосрочные пики ценой churn и long-tail reconnect.
|
||||
|
||||
## Правила эволюции модели
|
||||
|
||||
Чтобы расширять модель безопасно:
|
||||
- новые policy knobs сначала внедрять в Control Plane;
|
||||
- контракты Data Plane (`conn_id`, route/close семантика) держать стабильными;
|
||||
- перед дефолтным включением проверять generation/registry инварианты;
|
||||
- новые recovery/retry стратегии вводить через явный config-флаг.
|
||||
|
||||
## Нюансы отказов и восстановления
|
||||
|
||||
- падение single-endpoint DC — штатный деградированный сценарий; приоритет: быстрый reconnect и, при необходимости, shadow/probing;
|
||||
- idle-close со стороны peer должен считаться нормальным событием при upstream idle-timeout;
|
||||
- backoff reconnect-логики должен ограничивать синхронный churn, но сохранять быстрые первые попытки;
|
||||
- fallback (`ME -> direct DC`) — это переключаемая policy-ветка, а не автоматический признак бага транспорта.
|
||||
|
||||
## Краткий словарь
|
||||
- `Coverage`: достаточное число живых writer-ов для политики приёма по DC.
|
||||
- `Floor`: целевая минимальная ёмкость writer-ов.
|
||||
- `Churn`: частые циклы reconnect/remove writer-ов.
|
||||
- `Hotpath`: пер-пакетный/пер-коннектный путь, где любые лишние ожидания и аллокации особенно дороги.
|
||||
73
install.sh
Normal file
73
install.sh
Normal file
@@ -0,0 +1,73 @@
|
||||
sudo bash -c '
|
||||
set -e
|
||||
|
||||
# --- Проверка на существующую установку ---
|
||||
if systemctl list-unit-files | grep -q telemt.service; then
|
||||
# --- РЕЖИМ ОБНОВЛЕНИЯ ---
|
||||
echo "--- Обнаружена существующая установка Telemt. Запускаю обновление... ---"
|
||||
|
||||
echo "[*] Остановка службы telemt..."
|
||||
systemctl stop telemt || true # Игнорируем ошибку, если служба уже остановлена
|
||||
|
||||
echo "[1/2] Скачивание последней версии Telemt..."
|
||||
wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz
|
||||
|
||||
echo "[1/2] Замена исполняемого файла в /usr/local/bin..."
|
||||
mv telemt /usr/local/bin/telemt
|
||||
chmod +x /usr/local/bin/telemt
|
||||
|
||||
echo "[2/2] Запуск службы..."
|
||||
systemctl start telemt
|
||||
|
||||
echo "--- Обновление Telemt успешно завершено! ---"
|
||||
echo
|
||||
echo "Для проверки статуса службы выполните:"
|
||||
echo " systemctl status telemt"
|
||||
|
||||
else
|
||||
# --- РЕЖИМ НОВОЙ УСТАНОВКИ ---
|
||||
echo "--- Начало автоматической установки Telemt ---"
|
||||
|
||||
# Шаг 1: Скачивание и установка бинарного файла
|
||||
echo "[1/5] Скачивание последней версии Telemt..."
|
||||
wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz
|
||||
|
||||
echo "[1/5] Перемещение исполняемого файла в /usr/local/bin и установка прав..."
|
||||
mv telemt /usr/local/bin/telemt
|
||||
chmod +x /usr/local/bin/telemt
|
||||
|
||||
# Шаг 2: Генерация секрета
|
||||
echo "[2/5] Генерация секретного ключа..."
|
||||
SECRET=$(openssl rand -hex 16)
|
||||
|
||||
# Шаг 3: Создание файла конфигурации
|
||||
echo "[3/5] Создание файла конфигурации /etc/telemt.toml..."
|
||||
printf "# === General Settings ===\n[general]\n[general.modes]\nclassic = false\nsecure = false\ntls = true\n\n# === Anti-Censorship & Masking ===\n[censorship]\n# !!! ВАЖНО: Замените на ваш домен или домен, который вы хотите использовать для маскировки !!!\ntls_domain = \"petrovich.ru\"\n\n[access.users]\nhello = \"%s\"\n" "$SECRET" > /etc/telemt.toml
|
||||
|
||||
# Шаг 4: Создание службы Systemd
|
||||
echo "[4/5] Создание службы systemd..."
|
||||
printf "[Unit]\nDescription=Telemt Proxy\nAfter=network.target\n\n[Service]\nType=simple\nExecStart=/usr/local/bin/telemt /etc/telemt.toml\nRestart=on-failure\nRestartSec=5\nLimitNOFILE=65536\n\n[Install]\nWantedBy=multi-user.target\n" > /etc/systemd/system/telemt.service
|
||||
|
||||
# Шаг 5: Запуск службы
|
||||
echo "[5/5] Перезагрузка systemd, запуск и включение службы telemt..."
|
||||
systemctl daemon-reload
|
||||
systemctl start telemt
|
||||
systemctl enable telemt
|
||||
|
||||
echo "--- Установка и запуск Telemt успешно завершены! ---"
|
||||
echo
|
||||
echo "ВАЖНАЯ ИНФОРМАЦИЯ:"
|
||||
echo "==================="
|
||||
echo "1. Вам НЕОБХОДИМО отредактировать файл /etc/telemt.toml и заменить '\''petrovich.ru'\'' на другой домен"
|
||||
echo " с помощью команды:"
|
||||
echo " nano /etc/telemt.toml"
|
||||
echo " После редактирования файла перезапустите службу командой:"
|
||||
echo " sudo systemctl restart telemt"
|
||||
echo
|
||||
echo "2. Для проверки статуса службы выполните команду:"
|
||||
echo " systemctl status telemt"
|
||||
echo
|
||||
echo "3. Для получения ссылок на подключение выполните команду:"
|
||||
echo " journalctl -u telemt -n -g '\''links'\'' --no-pager -o cat | tac"
|
||||
fi
|
||||
'
|
||||
107
src/api/config_store.rs
Normal file
107
src/api/config_store.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
use std::io::Write;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use hyper::header::IF_MATCH;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
|
||||
use super::model::ApiFailure;
|
||||
|
||||
pub(super) fn parse_if_match(headers: &hyper::HeaderMap) -> Option<String> {
|
||||
headers
|
||||
.get(IF_MATCH)
|
||||
.and_then(|value| value.to_str().ok())
|
||||
.map(str::trim)
|
||||
.filter(|value| !value.is_empty())
|
||||
.map(|value| value.trim_matches('"').to_string())
|
||||
}
|
||||
|
||||
pub(super) async fn ensure_expected_revision(
|
||||
config_path: &Path,
|
||||
expected_revision: Option<&str>,
|
||||
) -> Result<(), ApiFailure> {
|
||||
let Some(expected) = expected_revision else {
|
||||
return Ok(());
|
||||
};
|
||||
let current = current_revision(config_path).await?;
|
||||
if current != expected {
|
||||
return Err(ApiFailure::new(
|
||||
hyper::StatusCode::CONFLICT,
|
||||
"revision_conflict",
|
||||
"Config revision mismatch",
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) async fn current_revision(config_path: &Path) -> Result<String, ApiFailure> {
|
||||
let content = tokio::fs::read_to_string(config_path)
|
||||
.await
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to read config: {}", e)))?;
|
||||
Ok(compute_revision(&content))
|
||||
}
|
||||
|
||||
pub(super) fn compute_revision(content: &str) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(content.as_bytes());
|
||||
hex::encode(hasher.finalize())
|
||||
}
|
||||
|
||||
pub(super) async fn load_config_from_disk(config_path: &Path) -> Result<ProxyConfig, ApiFailure> {
|
||||
let config_path = config_path.to_path_buf();
|
||||
tokio::task::spawn_blocking(move || ProxyConfig::load(config_path))
|
||||
.await
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to join config loader: {}", e)))?
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to load config: {}", e)))
|
||||
}
|
||||
|
||||
pub(super) async fn save_config_to_disk(
|
||||
config_path: &Path,
|
||||
cfg: &ProxyConfig,
|
||||
) -> Result<String, ApiFailure> {
|
||||
let serialized = toml::to_string_pretty(cfg)
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to serialize config: {}", e)))?;
|
||||
write_atomic(config_path.to_path_buf(), serialized.clone()).await?;
|
||||
Ok(compute_revision(&serialized))
|
||||
}
|
||||
|
||||
async fn write_atomic(path: PathBuf, contents: String) -> Result<(), ApiFailure> {
|
||||
tokio::task::spawn_blocking(move || write_atomic_sync(&path, &contents))
|
||||
.await
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to join writer: {}", e)))?
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to write config: {}", e)))
|
||||
}
|
||||
|
||||
fn write_atomic_sync(path: &Path, contents: &str) -> std::io::Result<()> {
|
||||
let parent = path.parent().unwrap_or_else(|| Path::new("."));
|
||||
std::fs::create_dir_all(parent)?;
|
||||
|
||||
let tmp_name = format!(
|
||||
".{}.tmp-{}",
|
||||
path.file_name()
|
||||
.and_then(|s| s.to_str())
|
||||
.unwrap_or("config.toml"),
|
||||
rand::random::<u64>()
|
||||
);
|
||||
let tmp_path = parent.join(tmp_name);
|
||||
|
||||
let write_result = (|| {
|
||||
let mut file = std::fs::OpenOptions::new()
|
||||
.create_new(true)
|
||||
.write(true)
|
||||
.open(&tmp_path)?;
|
||||
file.write_all(contents.as_bytes())?;
|
||||
file.sync_all()?;
|
||||
std::fs::rename(&tmp_path, path)?;
|
||||
if let Ok(dir) = std::fs::File::open(parent) {
|
||||
let _ = dir.sync_all();
|
||||
}
|
||||
Ok(())
|
||||
})();
|
||||
|
||||
if write_result.is_err() {
|
||||
let _ = std::fs::remove_file(&tmp_path);
|
||||
}
|
||||
write_result
|
||||
}
|
||||
90
src/api/events.rs
Normal file
90
src/api/events.rs
Normal file
@@ -0,0 +1,90 @@
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::Mutex;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct ApiEventRecord {
|
||||
pub(super) seq: u64,
|
||||
pub(super) ts_epoch_secs: u64,
|
||||
pub(super) event_type: String,
|
||||
pub(super) context: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct ApiEventSnapshot {
|
||||
pub(super) capacity: usize,
|
||||
pub(super) dropped_total: u64,
|
||||
pub(super) events: Vec<ApiEventRecord>,
|
||||
}
|
||||
|
||||
struct ApiEventsInner {
|
||||
capacity: usize,
|
||||
dropped_total: u64,
|
||||
next_seq: u64,
|
||||
events: VecDeque<ApiEventRecord>,
|
||||
}
|
||||
|
||||
/// Bounded ring-buffer for control-plane API/runtime events.
|
||||
pub(crate) struct ApiEventStore {
|
||||
inner: Mutex<ApiEventsInner>,
|
||||
}
|
||||
|
||||
impl ApiEventStore {
|
||||
pub(super) fn new(capacity: usize) -> Self {
|
||||
let bounded = capacity.max(16);
|
||||
Self {
|
||||
inner: Mutex::new(ApiEventsInner {
|
||||
capacity: bounded,
|
||||
dropped_total: 0,
|
||||
next_seq: 1,
|
||||
events: VecDeque::with_capacity(bounded),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn record(&self, event_type: &str, context: impl Into<String>) {
|
||||
let now_epoch_secs = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
let mut context = context.into();
|
||||
if context.len() > 256 {
|
||||
context.truncate(256);
|
||||
}
|
||||
|
||||
let mut guard = self.inner.lock().expect("api event store mutex poisoned");
|
||||
if guard.events.len() == guard.capacity {
|
||||
guard.events.pop_front();
|
||||
guard.dropped_total = guard.dropped_total.saturating_add(1);
|
||||
}
|
||||
let seq = guard.next_seq;
|
||||
guard.next_seq = guard.next_seq.saturating_add(1);
|
||||
guard.events.push_back(ApiEventRecord {
|
||||
seq,
|
||||
ts_epoch_secs: now_epoch_secs,
|
||||
event_type: event_type.to_string(),
|
||||
context,
|
||||
});
|
||||
}
|
||||
|
||||
pub(super) fn snapshot(&self, limit: usize) -> ApiEventSnapshot {
|
||||
let guard = self.inner.lock().expect("api event store mutex poisoned");
|
||||
let bounded_limit = limit.clamp(1, guard.capacity.max(1));
|
||||
let mut items: Vec<ApiEventRecord> = guard
|
||||
.events
|
||||
.iter()
|
||||
.rev()
|
||||
.take(bounded_limit)
|
||||
.cloned()
|
||||
.collect();
|
||||
items.reverse();
|
||||
|
||||
ApiEventSnapshot {
|
||||
capacity: guard.capacity,
|
||||
dropped_total: guard.dropped_total,
|
||||
events: items,
|
||||
}
|
||||
}
|
||||
}
|
||||
91
src/api/http_utils.rs
Normal file
91
src/api/http_utils.rs
Normal file
@@ -0,0 +1,91 @@
|
||||
use http_body_util::{BodyExt, Full};
|
||||
use hyper::StatusCode;
|
||||
use hyper::body::{Bytes, Incoming};
|
||||
use serde::Serialize;
|
||||
use serde::de::DeserializeOwned;
|
||||
|
||||
use super::model::{ApiFailure, ErrorBody, ErrorResponse, SuccessResponse};
|
||||
|
||||
pub(super) fn success_response<T: Serialize>(
|
||||
status: StatusCode,
|
||||
data: T,
|
||||
revision: String,
|
||||
) -> hyper::Response<Full<Bytes>> {
|
||||
let payload = SuccessResponse {
|
||||
ok: true,
|
||||
data,
|
||||
revision,
|
||||
};
|
||||
let body = serde_json::to_vec(&payload).unwrap_or_else(|_| b"{\"ok\":false}".to_vec());
|
||||
hyper::Response::builder()
|
||||
.status(status)
|
||||
.header("content-type", "application/json; charset=utf-8")
|
||||
.body(Full::new(Bytes::from(body)))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub(super) fn error_response(
|
||||
request_id: u64,
|
||||
failure: ApiFailure,
|
||||
) -> hyper::Response<Full<Bytes>> {
|
||||
let payload = ErrorResponse {
|
||||
ok: false,
|
||||
error: ErrorBody {
|
||||
code: failure.code,
|
||||
message: failure.message,
|
||||
},
|
||||
request_id,
|
||||
};
|
||||
let body = serde_json::to_vec(&payload).unwrap_or_else(|_| {
|
||||
format!(
|
||||
"{{\"ok\":false,\"error\":{{\"code\":\"internal_error\",\"message\":\"serialization failed\"}},\"request_id\":{}}}",
|
||||
request_id
|
||||
)
|
||||
.into_bytes()
|
||||
});
|
||||
hyper::Response::builder()
|
||||
.status(failure.status)
|
||||
.header("content-type", "application/json; charset=utf-8")
|
||||
.body(Full::new(Bytes::from(body)))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub(super) async fn read_json<T: DeserializeOwned>(
|
||||
body: Incoming,
|
||||
limit: usize,
|
||||
) -> Result<T, ApiFailure> {
|
||||
let bytes = read_body_with_limit(body, limit).await?;
|
||||
serde_json::from_slice(&bytes).map_err(|_| ApiFailure::bad_request("Invalid JSON body"))
|
||||
}
|
||||
|
||||
pub(super) async fn read_optional_json<T: DeserializeOwned>(
|
||||
body: Incoming,
|
||||
limit: usize,
|
||||
) -> Result<Option<T>, ApiFailure> {
|
||||
let bytes = read_body_with_limit(body, limit).await?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
serde_json::from_slice(&bytes)
|
||||
.map(Some)
|
||||
.map_err(|_| ApiFailure::bad_request("Invalid JSON body"))
|
||||
}
|
||||
|
||||
async fn read_body_with_limit(body: Incoming, limit: usize) -> Result<Vec<u8>, ApiFailure> {
|
||||
let mut collected = Vec::new();
|
||||
let mut body = body;
|
||||
while let Some(frame_result) = body.frame().await {
|
||||
let frame = frame_result.map_err(|_| ApiFailure::bad_request("Invalid request body"))?;
|
||||
if let Some(chunk) = frame.data_ref() {
|
||||
if collected.len().saturating_add(chunk.len()) > limit {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::PAYLOAD_TOO_LARGE,
|
||||
"payload_too_large",
|
||||
format!("Body exceeds {} bytes", limit),
|
||||
));
|
||||
}
|
||||
collected.extend_from_slice(chunk);
|
||||
}
|
||||
}
|
||||
Ok(collected)
|
||||
}
|
||||
529
src/api/mod.rs
Normal file
529
src/api/mod.rs
Normal file
@@ -0,0 +1,529 @@
|
||||
use std::convert::Infallible;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
|
||||
|
||||
use http_body_util::Full;
|
||||
use hyper::body::{Bytes, Incoming};
|
||||
use hyper::header::AUTHORIZATION;
|
||||
use hyper::server::conn::http1;
|
||||
use hyper::service::service_fn;
|
||||
use hyper::{Method, Request, Response, StatusCode};
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::sync::{Mutex, watch};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
use crate::transport::UpstreamManager;
|
||||
|
||||
mod config_store;
|
||||
mod events;
|
||||
mod http_utils;
|
||||
mod model;
|
||||
mod runtime_edge;
|
||||
mod runtime_min;
|
||||
mod runtime_stats;
|
||||
mod runtime_watch;
|
||||
mod runtime_zero;
|
||||
mod users;
|
||||
|
||||
use config_store::{current_revision, parse_if_match};
|
||||
use http_utils::{error_response, read_json, read_optional_json, success_response};
|
||||
use events::ApiEventStore;
|
||||
use model::{
|
||||
ApiFailure, CreateUserRequest, HealthData, PatchUserRequest, RotateSecretRequest, SummaryData,
|
||||
};
|
||||
use runtime_edge::{
|
||||
EdgeConnectionsCacheEntry, build_runtime_connections_summary_data,
|
||||
build_runtime_events_recent_data,
|
||||
};
|
||||
use runtime_min::{
|
||||
build_runtime_me_pool_state_data, build_runtime_me_quality_data, build_runtime_nat_stun_data,
|
||||
build_runtime_upstream_quality_data, build_security_whitelist_data,
|
||||
};
|
||||
use runtime_stats::{
|
||||
MinimalCacheEntry, build_dcs_data, build_me_writers_data, build_minimal_all_data,
|
||||
build_upstreams_data, build_zero_all_data,
|
||||
};
|
||||
use runtime_zero::{
|
||||
build_limits_effective_data, build_runtime_gates_data, build_security_posture_data,
|
||||
build_system_info_data,
|
||||
};
|
||||
use runtime_watch::spawn_runtime_watchers;
|
||||
use users::{create_user, delete_user, patch_user, rotate_secret, users_from_config};
|
||||
|
||||
pub(super) struct ApiRuntimeState {
|
||||
pub(super) process_started_at_epoch_secs: u64,
|
||||
pub(super) config_reload_count: AtomicU64,
|
||||
pub(super) last_config_reload_epoch_secs: AtomicU64,
|
||||
pub(super) admission_open: AtomicBool,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(super) struct ApiShared {
|
||||
pub(super) stats: Arc<Stats>,
|
||||
pub(super) ip_tracker: Arc<UserIpTracker>,
|
||||
pub(super) me_pool: Option<Arc<MePool>>,
|
||||
pub(super) upstream_manager: Arc<UpstreamManager>,
|
||||
pub(super) config_path: PathBuf,
|
||||
pub(super) startup_detected_ip_v4: Option<IpAddr>,
|
||||
pub(super) startup_detected_ip_v6: Option<IpAddr>,
|
||||
pub(super) mutation_lock: Arc<Mutex<()>>,
|
||||
pub(super) minimal_cache: Arc<Mutex<Option<MinimalCacheEntry>>>,
|
||||
pub(super) runtime_edge_connections_cache: Arc<Mutex<Option<EdgeConnectionsCacheEntry>>>,
|
||||
pub(super) runtime_edge_recompute_lock: Arc<Mutex<()>>,
|
||||
pub(super) runtime_events: Arc<ApiEventStore>,
|
||||
pub(super) request_id: Arc<AtomicU64>,
|
||||
pub(super) runtime_state: Arc<ApiRuntimeState>,
|
||||
}
|
||||
|
||||
impl ApiShared {
|
||||
fn next_request_id(&self) -> u64 {
|
||||
self.request_id.fetch_add(1, Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn serve(
|
||||
listen: SocketAddr,
|
||||
stats: Arc<Stats>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
admission_rx: watch::Receiver<bool>,
|
||||
config_path: PathBuf,
|
||||
startup_detected_ip_v4: Option<IpAddr>,
|
||||
startup_detected_ip_v6: Option<IpAddr>,
|
||||
process_started_at_epoch_secs: u64,
|
||||
) {
|
||||
let listener = match TcpListener::bind(listen).await {
|
||||
Ok(listener) => listener,
|
||||
Err(error) => {
|
||||
warn!(
|
||||
error = %error,
|
||||
listen = %listen,
|
||||
"Failed to bind API listener"
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
info!("API endpoint: http://{}/v1/*", listen);
|
||||
|
||||
let runtime_state = Arc::new(ApiRuntimeState {
|
||||
process_started_at_epoch_secs,
|
||||
config_reload_count: AtomicU64::new(0),
|
||||
last_config_reload_epoch_secs: AtomicU64::new(0),
|
||||
admission_open: AtomicBool::new(*admission_rx.borrow()),
|
||||
});
|
||||
|
||||
let shared = Arc::new(ApiShared {
|
||||
stats,
|
||||
ip_tracker,
|
||||
me_pool,
|
||||
upstream_manager,
|
||||
config_path,
|
||||
startup_detected_ip_v4,
|
||||
startup_detected_ip_v6,
|
||||
mutation_lock: Arc::new(Mutex::new(())),
|
||||
minimal_cache: Arc::new(Mutex::new(None)),
|
||||
runtime_edge_connections_cache: Arc::new(Mutex::new(None)),
|
||||
runtime_edge_recompute_lock: Arc::new(Mutex::new(())),
|
||||
runtime_events: Arc::new(ApiEventStore::new(
|
||||
config_rx.borrow().server.api.runtime_edge_events_capacity,
|
||||
)),
|
||||
request_id: Arc::new(AtomicU64::new(1)),
|
||||
runtime_state: runtime_state.clone(),
|
||||
});
|
||||
|
||||
spawn_runtime_watchers(
|
||||
config_rx.clone(),
|
||||
admission_rx.clone(),
|
||||
runtime_state.clone(),
|
||||
shared.runtime_events.clone(),
|
||||
);
|
||||
|
||||
loop {
|
||||
let (stream, peer) = match listener.accept().await {
|
||||
Ok(v) => v,
|
||||
Err(error) => {
|
||||
warn!(error = %error, "API accept error");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let shared_conn = shared.clone();
|
||||
let config_rx_conn = config_rx.clone();
|
||||
tokio::spawn(async move {
|
||||
let svc = service_fn(move |req: Request<Incoming>| {
|
||||
let shared_req = shared_conn.clone();
|
||||
let config_rx_req = config_rx_conn.clone();
|
||||
async move { handle(req, peer, shared_req, config_rx_req).await }
|
||||
});
|
||||
if let Err(error) = http1::Builder::new()
|
||||
.serve_connection(hyper_util::rt::TokioIo::new(stream), svc)
|
||||
.await
|
||||
{
|
||||
debug!(error = %error, "API connection error");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
req: Request<Incoming>,
|
||||
peer: SocketAddr,
|
||||
shared: Arc<ApiShared>,
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
) -> Result<Response<Full<Bytes>>, Infallible> {
|
||||
let request_id = shared.next_request_id();
|
||||
let cfg = config_rx.borrow().clone();
|
||||
let api_cfg = &cfg.server.api;
|
||||
|
||||
if !api_cfg.enabled {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
"api_disabled",
|
||||
"API is disabled",
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
if !api_cfg.whitelist.is_empty()
|
||||
&& !api_cfg
|
||||
.whitelist
|
||||
.iter()
|
||||
.any(|net| net.contains(peer.ip()))
|
||||
{
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(StatusCode::FORBIDDEN, "forbidden", "Source IP is not allowed"),
|
||||
));
|
||||
}
|
||||
|
||||
if !api_cfg.auth_header.is_empty() {
|
||||
let auth_ok = req
|
||||
.headers()
|
||||
.get(AUTHORIZATION)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|v| v == api_cfg.auth_header)
|
||||
.unwrap_or(false);
|
||||
if !auth_ok {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::UNAUTHORIZED,
|
||||
"unauthorized",
|
||||
"Missing or invalid Authorization header",
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let method = req.method().clone();
|
||||
let path = req.uri().path().to_string();
|
||||
let query = req.uri().query().map(str::to_string);
|
||||
let body_limit = api_cfg.request_body_limit_bytes;
|
||||
|
||||
let result: Result<Response<Full<Bytes>>, ApiFailure> = async {
|
||||
match (method.as_str(), path.as_str()) {
|
||||
("GET", "/v1/health") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = HealthData {
|
||||
status: "ok",
|
||||
read_only: api_cfg.read_only,
|
||||
};
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/system/info") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_system_info_data(shared.as_ref(), cfg.as_ref(), &revision);
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/gates") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_gates_data(shared.as_ref(), cfg.as_ref());
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/limits/effective") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_limits_effective_data(cfg.as_ref());
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/security/posture") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_security_posture_data(cfg.as_ref());
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/security/whitelist") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_security_whitelist_data(cfg.as_ref());
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/summary") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = SummaryData {
|
||||
uptime_seconds: shared.stats.uptime_secs(),
|
||||
connections_total: shared.stats.get_connects_all(),
|
||||
connections_bad_total: shared.stats.get_connects_bad(),
|
||||
handshake_timeouts_total: shared.stats.get_handshake_timeouts(),
|
||||
configured_users: cfg.access.users.len(),
|
||||
};
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/zero/all") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_zero_all_data(&shared.stats, cfg.access.users.len());
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/upstreams") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_upstreams_data(shared.as_ref(), api_cfg);
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/minimal/all") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_minimal_all_data(shared.as_ref(), api_cfg).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/me-writers") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_me_writers_data(shared.as_ref(), api_cfg).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/dcs") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_dcs_data(shared.as_ref(), api_cfg).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/me_pool_state") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_me_pool_state_data(shared.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/me_quality") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_me_quality_data(shared.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/upstream_quality") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_upstream_quality_data(shared.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/nat_stun") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_nat_stun_data(shared.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/connections/summary") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_connections_summary_data(shared.as_ref(), cfg.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/events/recent") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_events_recent_data(
|
||||
shared.as_ref(),
|
||||
cfg.as_ref(),
|
||||
query.as_deref(),
|
||||
);
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/users") | ("GET", "/v1/users") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
shared.startup_detected_ip_v4,
|
||||
shared.startup_detected_ip_v6,
|
||||
)
|
||||
.await;
|
||||
Ok(success_response(StatusCode::OK, users, revision))
|
||||
}
|
||||
("POST", "/v1/users") => {
|
||||
if api_cfg.read_only {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::FORBIDDEN,
|
||||
"read_only",
|
||||
"API runs in read-only mode",
|
||||
),
|
||||
));
|
||||
}
|
||||
let expected_revision = parse_if_match(req.headers());
|
||||
let body = read_json::<CreateUserRequest>(req.into_body(), body_limit).await?;
|
||||
let result = create_user(body, expected_revision, &shared).await;
|
||||
let (data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared.runtime_events.record("api.user.create.failed", error.code);
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
shared
|
||||
.runtime_events
|
||||
.record("api.user.create.ok", format!("username={}", data.user.username));
|
||||
Ok(success_response(StatusCode::CREATED, data, revision))
|
||||
}
|
||||
_ => {
|
||||
if let Some(user) = path.strip_prefix("/v1/users/")
|
||||
&& !user.is_empty()
|
||||
&& !user.contains('/')
|
||||
{
|
||||
if method == Method::GET {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
shared.startup_detected_ip_v4,
|
||||
shared.startup_detected_ip_v6,
|
||||
)
|
||||
.await;
|
||||
if let Some(user_info) = users.into_iter().find(|entry| entry.username == user)
|
||||
{
|
||||
return Ok(success_response(StatusCode::OK, user_info, revision));
|
||||
}
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(StatusCode::NOT_FOUND, "not_found", "User not found"),
|
||||
));
|
||||
}
|
||||
if method == Method::PATCH {
|
||||
if api_cfg.read_only {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::FORBIDDEN,
|
||||
"read_only",
|
||||
"API runs in read-only mode",
|
||||
),
|
||||
));
|
||||
}
|
||||
let expected_revision = parse_if_match(req.headers());
|
||||
let body = read_json::<PatchUserRequest>(req.into_body(), body_limit).await?;
|
||||
let result = patch_user(user, body, expected_revision, &shared).await;
|
||||
let (data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared.runtime_events.record(
|
||||
"api.user.patch.failed",
|
||||
format!("username={} code={}", user, error.code),
|
||||
);
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
shared
|
||||
.runtime_events
|
||||
.record("api.user.patch.ok", format!("username={}", data.username));
|
||||
return Ok(success_response(StatusCode::OK, data, revision));
|
||||
}
|
||||
if method == Method::DELETE {
|
||||
if api_cfg.read_only {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::FORBIDDEN,
|
||||
"read_only",
|
||||
"API runs in read-only mode",
|
||||
),
|
||||
));
|
||||
}
|
||||
let expected_revision = parse_if_match(req.headers());
|
||||
let result = delete_user(user, expected_revision, &shared).await;
|
||||
let (deleted_user, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared.runtime_events.record(
|
||||
"api.user.delete.failed",
|
||||
format!("username={} code={}", user, error.code),
|
||||
);
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
shared.runtime_events.record(
|
||||
"api.user.delete.ok",
|
||||
format!("username={}", deleted_user),
|
||||
);
|
||||
return Ok(success_response(StatusCode::OK, deleted_user, revision));
|
||||
}
|
||||
if method == Method::POST
|
||||
&& let Some(base_user) = user.strip_suffix("/rotate-secret")
|
||||
&& !base_user.is_empty()
|
||||
&& !base_user.contains('/')
|
||||
{
|
||||
if api_cfg.read_only {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::FORBIDDEN,
|
||||
"read_only",
|
||||
"API runs in read-only mode",
|
||||
),
|
||||
));
|
||||
}
|
||||
let expected_revision = parse_if_match(req.headers());
|
||||
let body =
|
||||
read_optional_json::<RotateSecretRequest>(req.into_body(), body_limit)
|
||||
.await?;
|
||||
let result = rotate_secret(
|
||||
base_user,
|
||||
body.unwrap_or_default(),
|
||||
expected_revision,
|
||||
&shared,
|
||||
)
|
||||
.await;
|
||||
let (data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared.runtime_events.record(
|
||||
"api.user.rotate_secret.failed",
|
||||
format!("username={} code={}", base_user, error.code),
|
||||
);
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
shared.runtime_events.record(
|
||||
"api.user.rotate_secret.ok",
|
||||
format!("username={}", base_user),
|
||||
);
|
||||
return Ok(success_response(StatusCode::OK, data, revision));
|
||||
}
|
||||
if method == Method::POST {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(StatusCode::NOT_FOUND, "not_found", "Route not found"),
|
||||
));
|
||||
}
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::METHOD_NOT_ALLOWED,
|
||||
"method_not_allowed",
|
||||
"Unsupported HTTP method for this route",
|
||||
),
|
||||
));
|
||||
}
|
||||
Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(StatusCode::NOT_FOUND, "not_found", "Route not found"),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(resp) => Ok(resp),
|
||||
Err(error) => Ok(error_response(request_id, error)),
|
||||
}
|
||||
}
|
||||
444
src/api/model.rs
Normal file
444
src/api/model.rs
Normal file
@@ -0,0 +1,444 @@
|
||||
use std::net::IpAddr;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use hyper::StatusCode;
|
||||
use rand::Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
const MAX_USERNAME_LEN: usize = 64;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(super) struct ApiFailure {
|
||||
pub(super) status: StatusCode,
|
||||
pub(super) code: &'static str,
|
||||
pub(super) message: String,
|
||||
}
|
||||
|
||||
impl ApiFailure {
|
||||
pub(super) fn new(status: StatusCode, code: &'static str, message: impl Into<String>) -> Self {
|
||||
Self {
|
||||
status,
|
||||
code,
|
||||
message: message.into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn internal(message: impl Into<String>) -> Self {
|
||||
Self::new(StatusCode::INTERNAL_SERVER_ERROR, "internal_error", message)
|
||||
}
|
||||
|
||||
pub(super) fn bad_request(message: impl Into<String>) -> Self {
|
||||
Self::new(StatusCode::BAD_REQUEST, "bad_request", message)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct ErrorBody {
|
||||
pub(super) code: &'static str,
|
||||
pub(super) message: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct ErrorResponse {
|
||||
pub(super) ok: bool,
|
||||
pub(super) error: ErrorBody,
|
||||
pub(super) request_id: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct SuccessResponse<T> {
|
||||
pub(super) ok: bool,
|
||||
pub(super) data: T,
|
||||
pub(super) revision: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct HealthData {
|
||||
pub(super) status: &'static str,
|
||||
pub(super) read_only: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct SummaryData {
|
||||
pub(super) uptime_seconds: f64,
|
||||
pub(super) connections_total: u64,
|
||||
pub(super) connections_bad_total: u64,
|
||||
pub(super) handshake_timeouts_total: u64,
|
||||
pub(super) configured_users: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroCodeCount {
|
||||
pub(super) code: i32,
|
||||
pub(super) total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroCoreData {
|
||||
pub(super) uptime_seconds: f64,
|
||||
pub(super) connections_total: u64,
|
||||
pub(super) connections_bad_total: u64,
|
||||
pub(super) handshake_timeouts_total: u64,
|
||||
pub(super) configured_users: usize,
|
||||
pub(super) telemetry_core_enabled: bool,
|
||||
pub(super) telemetry_user_enabled: bool,
|
||||
pub(super) telemetry_me_level: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroUpstreamData {
|
||||
pub(super) connect_attempt_total: u64,
|
||||
pub(super) connect_success_total: u64,
|
||||
pub(super) connect_fail_total: u64,
|
||||
pub(super) connect_failfast_hard_error_total: u64,
|
||||
pub(super) connect_attempts_bucket_1: u64,
|
||||
pub(super) connect_attempts_bucket_2: u64,
|
||||
pub(super) connect_attempts_bucket_3_4: u64,
|
||||
pub(super) connect_attempts_bucket_gt_4: u64,
|
||||
pub(super) connect_duration_success_bucket_le_100ms: u64,
|
||||
pub(super) connect_duration_success_bucket_101_500ms: u64,
|
||||
pub(super) connect_duration_success_bucket_501_1000ms: u64,
|
||||
pub(super) connect_duration_success_bucket_gt_1000ms: u64,
|
||||
pub(super) connect_duration_fail_bucket_le_100ms: u64,
|
||||
pub(super) connect_duration_fail_bucket_101_500ms: u64,
|
||||
pub(super) connect_duration_fail_bucket_501_1000ms: u64,
|
||||
pub(super) connect_duration_fail_bucket_gt_1000ms: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct UpstreamDcStatus {
|
||||
pub(super) dc: i16,
|
||||
pub(super) latency_ema_ms: Option<f64>,
|
||||
pub(super) ip_preference: &'static str,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct UpstreamStatus {
|
||||
pub(super) upstream_id: usize,
|
||||
pub(super) route_kind: &'static str,
|
||||
pub(super) address: String,
|
||||
pub(super) weight: u16,
|
||||
pub(super) scopes: String,
|
||||
pub(super) healthy: bool,
|
||||
pub(super) fails: u32,
|
||||
pub(super) last_check_age_secs: u64,
|
||||
pub(super) effective_latency_ms: Option<f64>,
|
||||
pub(super) dc: Vec<UpstreamDcStatus>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct UpstreamSummaryData {
|
||||
pub(super) configured_total: usize,
|
||||
pub(super) healthy_total: usize,
|
||||
pub(super) unhealthy_total: usize,
|
||||
pub(super) direct_total: usize,
|
||||
pub(super) socks4_total: usize,
|
||||
pub(super) socks5_total: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct UpstreamsData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) zero: ZeroUpstreamData,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) summary: Option<UpstreamSummaryData>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) upstreams: Option<Vec<UpstreamStatus>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroMiddleProxyData {
|
||||
pub(super) keepalive_sent_total: u64,
|
||||
pub(super) keepalive_failed_total: u64,
|
||||
pub(super) keepalive_pong_total: u64,
|
||||
pub(super) keepalive_timeout_total: u64,
|
||||
pub(super) rpc_proxy_req_signal_sent_total: u64,
|
||||
pub(super) rpc_proxy_req_signal_failed_total: u64,
|
||||
pub(super) rpc_proxy_req_signal_skipped_no_meta_total: u64,
|
||||
pub(super) rpc_proxy_req_signal_response_total: u64,
|
||||
pub(super) rpc_proxy_req_signal_close_sent_total: u64,
|
||||
pub(super) reconnect_attempt_total: u64,
|
||||
pub(super) reconnect_success_total: u64,
|
||||
pub(super) handshake_reject_total: u64,
|
||||
pub(super) handshake_error_codes: Vec<ZeroCodeCount>,
|
||||
pub(super) reader_eof_total: u64,
|
||||
pub(super) idle_close_by_peer_total: u64,
|
||||
pub(super) route_drop_no_conn_total: u64,
|
||||
pub(super) route_drop_channel_closed_total: u64,
|
||||
pub(super) route_drop_queue_full_total: u64,
|
||||
pub(super) route_drop_queue_full_base_total: u64,
|
||||
pub(super) route_drop_queue_full_high_total: u64,
|
||||
pub(super) socks_kdf_strict_reject_total: u64,
|
||||
pub(super) socks_kdf_compat_fallback_total: u64,
|
||||
pub(super) endpoint_quarantine_total: u64,
|
||||
pub(super) kdf_drift_total: u64,
|
||||
pub(super) kdf_port_only_drift_total: u64,
|
||||
pub(super) hardswap_pending_reuse_total: u64,
|
||||
pub(super) hardswap_pending_ttl_expired_total: u64,
|
||||
pub(super) single_endpoint_outage_enter_total: u64,
|
||||
pub(super) single_endpoint_outage_exit_total: u64,
|
||||
pub(super) single_endpoint_outage_reconnect_attempt_total: u64,
|
||||
pub(super) single_endpoint_outage_reconnect_success_total: u64,
|
||||
pub(super) single_endpoint_quarantine_bypass_total: u64,
|
||||
pub(super) single_endpoint_shadow_rotate_total: u64,
|
||||
pub(super) single_endpoint_shadow_rotate_skipped_quarantine_total: u64,
|
||||
pub(super) floor_mode_switch_total: u64,
|
||||
pub(super) floor_mode_switch_static_to_adaptive_total: u64,
|
||||
pub(super) floor_mode_switch_adaptive_to_static_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroPoolData {
|
||||
pub(super) pool_swap_total: u64,
|
||||
pub(super) pool_drain_active: u64,
|
||||
pub(super) pool_force_close_total: u64,
|
||||
pub(super) pool_stale_pick_total: u64,
|
||||
pub(super) writer_removed_total: u64,
|
||||
pub(super) writer_removed_unexpected_total: u64,
|
||||
pub(super) refill_triggered_total: u64,
|
||||
pub(super) refill_skipped_inflight_total: u64,
|
||||
pub(super) refill_failed_total: u64,
|
||||
pub(super) writer_restored_same_endpoint_total: u64,
|
||||
pub(super) writer_restored_fallback_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroDesyncData {
|
||||
pub(super) secure_padding_invalid_total: u64,
|
||||
pub(super) desync_total: u64,
|
||||
pub(super) desync_full_logged_total: u64,
|
||||
pub(super) desync_suppressed_total: u64,
|
||||
pub(super) desync_frames_bucket_0: u64,
|
||||
pub(super) desync_frames_bucket_1_2: u64,
|
||||
pub(super) desync_frames_bucket_3_10: u64,
|
||||
pub(super) desync_frames_bucket_gt_10: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroAllData {
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) core: ZeroCoreData,
|
||||
pub(super) upstream: ZeroUpstreamData,
|
||||
pub(super) middle_proxy: ZeroMiddleProxyData,
|
||||
pub(super) pool: ZeroPoolData,
|
||||
pub(super) desync: ZeroDesyncData,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MeWritersSummary {
|
||||
pub(super) configured_dc_groups: usize,
|
||||
pub(super) configured_endpoints: usize,
|
||||
pub(super) available_endpoints: usize,
|
||||
pub(super) available_pct: f64,
|
||||
pub(super) required_writers: usize,
|
||||
pub(super) alive_writers: usize,
|
||||
pub(super) coverage_pct: f64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MeWriterStatus {
|
||||
pub(super) writer_id: u64,
|
||||
pub(super) dc: Option<i16>,
|
||||
pub(super) endpoint: String,
|
||||
pub(super) generation: u64,
|
||||
pub(super) state: &'static str,
|
||||
pub(super) draining: bool,
|
||||
pub(super) degraded: bool,
|
||||
pub(super) bound_clients: usize,
|
||||
pub(super) idle_for_secs: Option<u64>,
|
||||
pub(super) rtt_ema_ms: Option<f64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MeWritersData {
|
||||
pub(super) middle_proxy_enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) summary: MeWritersSummary,
|
||||
pub(super) writers: Vec<MeWriterStatus>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct DcStatus {
|
||||
pub(super) dc: i16,
|
||||
pub(super) endpoints: Vec<String>,
|
||||
pub(super) available_endpoints: usize,
|
||||
pub(super) available_pct: f64,
|
||||
pub(super) required_writers: usize,
|
||||
pub(super) alive_writers: usize,
|
||||
pub(super) coverage_pct: f64,
|
||||
pub(super) rtt_ms: Option<f64>,
|
||||
pub(super) load: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct DcStatusData {
|
||||
pub(super) middle_proxy_enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) dcs: Vec<DcStatus>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MinimalQuarantineData {
|
||||
pub(super) endpoint: String,
|
||||
pub(super) remaining_ms: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MinimalDcPathData {
|
||||
pub(super) dc: i16,
|
||||
pub(super) ip_preference: Option<&'static str>,
|
||||
pub(super) selected_addr_v4: Option<String>,
|
||||
pub(super) selected_addr_v6: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MinimalMeRuntimeData {
|
||||
pub(super) active_generation: u64,
|
||||
pub(super) warm_generation: u64,
|
||||
pub(super) pending_hardswap_generation: u64,
|
||||
pub(super) pending_hardswap_age_secs: Option<u64>,
|
||||
pub(super) hardswap_enabled: bool,
|
||||
pub(super) floor_mode: &'static str,
|
||||
pub(super) adaptive_floor_idle_secs: u64,
|
||||
pub(super) adaptive_floor_min_writers_single_endpoint: u8,
|
||||
pub(super) adaptive_floor_recover_grace_secs: u64,
|
||||
pub(super) me_keepalive_enabled: bool,
|
||||
pub(super) me_keepalive_interval_secs: u64,
|
||||
pub(super) me_keepalive_jitter_secs: u64,
|
||||
pub(super) me_keepalive_payload_random: bool,
|
||||
pub(super) rpc_proxy_req_every_secs: u64,
|
||||
pub(super) me_reconnect_max_concurrent_per_dc: u32,
|
||||
pub(super) me_reconnect_backoff_base_ms: u64,
|
||||
pub(super) me_reconnect_backoff_cap_ms: u64,
|
||||
pub(super) me_reconnect_fast_retry_count: u32,
|
||||
pub(super) me_pool_drain_ttl_secs: u64,
|
||||
pub(super) me_pool_force_close_secs: u64,
|
||||
pub(super) me_pool_min_fresh_ratio: f32,
|
||||
pub(super) me_bind_stale_mode: &'static str,
|
||||
pub(super) me_bind_stale_ttl_secs: u64,
|
||||
pub(super) me_single_endpoint_shadow_writers: u8,
|
||||
pub(super) me_single_endpoint_outage_mode_enabled: bool,
|
||||
pub(super) me_single_endpoint_outage_disable_quarantine: bool,
|
||||
pub(super) me_single_endpoint_outage_backoff_min_ms: u64,
|
||||
pub(super) me_single_endpoint_outage_backoff_max_ms: u64,
|
||||
pub(super) me_single_endpoint_shadow_rotate_every_secs: u64,
|
||||
pub(super) me_deterministic_writer_sort: bool,
|
||||
pub(super) me_socks_kdf_policy: &'static str,
|
||||
pub(super) quarantined_endpoints_total: usize,
|
||||
pub(super) quarantined_endpoints: Vec<MinimalQuarantineData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MinimalAllPayload {
|
||||
pub(super) me_writers: MeWritersData,
|
||||
pub(super) dcs: DcStatusData,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) me_runtime: Option<MinimalMeRuntimeData>,
|
||||
pub(super) network_path: Vec<MinimalDcPathData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MinimalAllData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<MinimalAllPayload>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct UserLinks {
|
||||
pub(super) classic: Vec<String>,
|
||||
pub(super) secure: Vec<String>,
|
||||
pub(super) tls: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct UserInfo {
|
||||
pub(super) username: String,
|
||||
pub(super) user_ad_tag: Option<String>,
|
||||
pub(super) max_tcp_conns: Option<usize>,
|
||||
pub(super) expiration_rfc3339: Option<String>,
|
||||
pub(super) data_quota_bytes: Option<u64>,
|
||||
pub(super) max_unique_ips: Option<usize>,
|
||||
pub(super) current_connections: u64,
|
||||
pub(super) active_unique_ips: usize,
|
||||
pub(super) active_unique_ips_list: Vec<IpAddr>,
|
||||
pub(super) recent_unique_ips: usize,
|
||||
pub(super) recent_unique_ips_list: Vec<IpAddr>,
|
||||
pub(super) total_octets: u64,
|
||||
pub(super) links: UserLinks,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct CreateUserResponse {
|
||||
pub(super) user: UserInfo,
|
||||
pub(super) secret: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(super) struct CreateUserRequest {
|
||||
pub(super) username: String,
|
||||
pub(super) secret: Option<String>,
|
||||
pub(super) user_ad_tag: Option<String>,
|
||||
pub(super) max_tcp_conns: Option<usize>,
|
||||
pub(super) expiration_rfc3339: Option<String>,
|
||||
pub(super) data_quota_bytes: Option<u64>,
|
||||
pub(super) max_unique_ips: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(super) struct PatchUserRequest {
|
||||
pub(super) secret: Option<String>,
|
||||
pub(super) user_ad_tag: Option<String>,
|
||||
pub(super) max_tcp_conns: Option<usize>,
|
||||
pub(super) expiration_rfc3339: Option<String>,
|
||||
pub(super) data_quota_bytes: Option<u64>,
|
||||
pub(super) max_unique_ips: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Default, Deserialize)]
|
||||
pub(super) struct RotateSecretRequest {
|
||||
pub(super) secret: Option<String>,
|
||||
}
|
||||
|
||||
pub(super) fn parse_optional_expiration(
|
||||
value: Option<&str>,
|
||||
) -> Result<Option<DateTime<Utc>>, ApiFailure> {
|
||||
let Some(raw) = value else {
|
||||
return Ok(None);
|
||||
};
|
||||
let parsed = DateTime::parse_from_rfc3339(raw)
|
||||
.map_err(|_| ApiFailure::bad_request("expiration_rfc3339 must be valid RFC3339"))?;
|
||||
Ok(Some(parsed.with_timezone(&Utc)))
|
||||
}
|
||||
|
||||
pub(super) fn is_valid_user_secret(secret: &str) -> bool {
|
||||
secret.len() == 32 && secret.chars().all(|c| c.is_ascii_hexdigit())
|
||||
}
|
||||
|
||||
pub(super) fn is_valid_ad_tag(tag: &str) -> bool {
|
||||
tag.len() == 32 && tag.chars().all(|c| c.is_ascii_hexdigit())
|
||||
}
|
||||
|
||||
pub(super) fn is_valid_username(user: &str) -> bool {
|
||||
!user.is_empty()
|
||||
&& user.len() <= MAX_USERNAME_LEN
|
||||
&& user
|
||||
.chars()
|
||||
.all(|ch| ch.is_ascii_alphanumeric() || matches!(ch, '_' | '-' | '.'))
|
||||
}
|
||||
|
||||
pub(super) fn random_user_secret() -> String {
|
||||
let mut bytes = [0u8; 16];
|
||||
rand::rng().fill(&mut bytes);
|
||||
hex::encode(bytes)
|
||||
}
|
||||
294
src/api/runtime_edge.rs
Normal file
294
src/api/runtime_edge.rs
Normal file
@@ -0,0 +1,294 @@
|
||||
use std::cmp::Reverse;
|
||||
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
|
||||
use super::ApiShared;
|
||||
use super::events::ApiEventRecord;
|
||||
|
||||
const FEATURE_DISABLED_REASON: &str = "feature_disabled";
|
||||
const SOURCE_UNAVAILABLE_REASON: &str = "source_unavailable";
|
||||
const EVENTS_DEFAULT_LIMIT: usize = 50;
|
||||
const EVENTS_MAX_LIMIT: usize = 1000;
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionUserData {
|
||||
pub(super) username: String,
|
||||
pub(super) current_connections: u64,
|
||||
pub(super) total_octets: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionTotalsData {
|
||||
pub(super) current_connections: u64,
|
||||
pub(super) current_connections_me: u64,
|
||||
pub(super) current_connections_direct: u64,
|
||||
pub(super) active_users: usize,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionTopData {
|
||||
pub(super) limit: usize,
|
||||
pub(super) by_connections: Vec<RuntimeEdgeConnectionUserData>,
|
||||
pub(super) by_throughput: Vec<RuntimeEdgeConnectionUserData>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionCacheData {
|
||||
pub(super) ttl_ms: u64,
|
||||
pub(super) served_from_cache: bool,
|
||||
pub(super) stale_cache_used: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionTelemetryData {
|
||||
pub(super) user_enabled: bool,
|
||||
pub(super) throughput_is_cumulative: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionsSummaryPayload {
|
||||
pub(super) cache: RuntimeEdgeConnectionCacheData,
|
||||
pub(super) totals: RuntimeEdgeConnectionTotalsData,
|
||||
pub(super) top: RuntimeEdgeConnectionTopData,
|
||||
pub(super) telemetry: RuntimeEdgeConnectionTelemetryData,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionsSummaryData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeEdgeConnectionsSummaryPayload>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct EdgeConnectionsCacheEntry {
|
||||
pub(super) expires_at: Instant,
|
||||
pub(super) payload: RuntimeEdgeConnectionsSummaryPayload,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeEdgeEventsPayload {
|
||||
pub(super) capacity: usize,
|
||||
pub(super) dropped_total: u64,
|
||||
pub(super) events: Vec<ApiEventRecord>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeEdgeEventsData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeEdgeEventsPayload>,
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_connections_summary_data(
|
||||
shared: &ApiShared,
|
||||
cfg: &ProxyConfig,
|
||||
) -> RuntimeEdgeConnectionsSummaryData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let api_cfg = &cfg.server.api;
|
||||
if !api_cfg.runtime_edge_enabled {
|
||||
return RuntimeEdgeConnectionsSummaryData {
|
||||
enabled: false,
|
||||
reason: Some(FEATURE_DISABLED_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
}
|
||||
|
||||
let (generated_at_epoch_secs, payload) = match get_connections_payload_cached(
|
||||
shared,
|
||||
api_cfg.runtime_edge_cache_ttl_ms,
|
||||
api_cfg.runtime_edge_top_n,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Some(v) => v,
|
||||
None => {
|
||||
return RuntimeEdgeConnectionsSummaryData {
|
||||
enabled: true,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
RuntimeEdgeConnectionsSummaryData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
data: Some(payload),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn build_runtime_events_recent_data(
|
||||
shared: &ApiShared,
|
||||
cfg: &ProxyConfig,
|
||||
query: Option<&str>,
|
||||
) -> RuntimeEdgeEventsData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let api_cfg = &cfg.server.api;
|
||||
if !api_cfg.runtime_edge_enabled {
|
||||
return RuntimeEdgeEventsData {
|
||||
enabled: false,
|
||||
reason: Some(FEATURE_DISABLED_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
}
|
||||
|
||||
let limit = parse_recent_events_limit(query, EVENTS_DEFAULT_LIMIT, EVENTS_MAX_LIMIT);
|
||||
let snapshot = shared.runtime_events.snapshot(limit);
|
||||
|
||||
RuntimeEdgeEventsData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: Some(RuntimeEdgeEventsPayload {
|
||||
capacity: snapshot.capacity,
|
||||
dropped_total: snapshot.dropped_total,
|
||||
events: snapshot.events,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_connections_payload_cached(
|
||||
shared: &ApiShared,
|
||||
cache_ttl_ms: u64,
|
||||
top_n: usize,
|
||||
) -> Option<(u64, RuntimeEdgeConnectionsSummaryPayload)> {
|
||||
if cache_ttl_ms > 0 {
|
||||
let now = Instant::now();
|
||||
let cached = shared.runtime_edge_connections_cache.lock().await.clone();
|
||||
if let Some(entry) = cached
|
||||
&& now < entry.expires_at
|
||||
{
|
||||
let mut payload = entry.payload;
|
||||
payload.cache.served_from_cache = true;
|
||||
payload.cache.stale_cache_used = false;
|
||||
return Some((entry.generated_at_epoch_secs, payload));
|
||||
}
|
||||
}
|
||||
|
||||
let Ok(_guard) = shared.runtime_edge_recompute_lock.try_lock() else {
|
||||
let cached = shared.runtime_edge_connections_cache.lock().await.clone();
|
||||
if let Some(entry) = cached {
|
||||
let mut payload = entry.payload;
|
||||
payload.cache.served_from_cache = true;
|
||||
payload.cache.stale_cache_used = true;
|
||||
return Some((entry.generated_at_epoch_secs, payload));
|
||||
}
|
||||
return None;
|
||||
};
|
||||
|
||||
let generated_at_epoch_secs = now_epoch_secs();
|
||||
let payload = recompute_connections_payload(shared, cache_ttl_ms, top_n).await;
|
||||
|
||||
if cache_ttl_ms > 0 {
|
||||
let entry = EdgeConnectionsCacheEntry {
|
||||
expires_at: Instant::now() + Duration::from_millis(cache_ttl_ms),
|
||||
payload: payload.clone(),
|
||||
generated_at_epoch_secs,
|
||||
};
|
||||
*shared.runtime_edge_connections_cache.lock().await = Some(entry);
|
||||
}
|
||||
|
||||
Some((generated_at_epoch_secs, payload))
|
||||
}
|
||||
|
||||
async fn recompute_connections_payload(
|
||||
shared: &ApiShared,
|
||||
cache_ttl_ms: u64,
|
||||
top_n: usize,
|
||||
) -> RuntimeEdgeConnectionsSummaryPayload {
|
||||
let mut rows = Vec::<RuntimeEdgeConnectionUserData>::new();
|
||||
let mut active_users = 0usize;
|
||||
for entry in shared.stats.iter_user_stats() {
|
||||
let user_stats = entry.value();
|
||||
let current_connections = user_stats
|
||||
.curr_connects
|
||||
.load(std::sync::atomic::Ordering::Relaxed);
|
||||
let total_octets = user_stats
|
||||
.octets_from_client
|
||||
.load(std::sync::atomic::Ordering::Relaxed)
|
||||
.saturating_add(
|
||||
user_stats
|
||||
.octets_to_client
|
||||
.load(std::sync::atomic::Ordering::Relaxed),
|
||||
);
|
||||
if current_connections > 0 {
|
||||
active_users = active_users.saturating_add(1);
|
||||
}
|
||||
rows.push(RuntimeEdgeConnectionUserData {
|
||||
username: entry.key().clone(),
|
||||
current_connections,
|
||||
total_octets,
|
||||
});
|
||||
}
|
||||
|
||||
let limit = top_n.max(1);
|
||||
let mut by_connections = rows.clone();
|
||||
by_connections.sort_by_key(|row| (Reverse(row.current_connections), row.username.clone()));
|
||||
by_connections.truncate(limit);
|
||||
|
||||
let mut by_throughput = rows;
|
||||
by_throughput.sort_by_key(|row| (Reverse(row.total_octets), row.username.clone()));
|
||||
by_throughput.truncate(limit);
|
||||
|
||||
let telemetry = shared.stats.telemetry_policy();
|
||||
RuntimeEdgeConnectionsSummaryPayload {
|
||||
cache: RuntimeEdgeConnectionCacheData {
|
||||
ttl_ms: cache_ttl_ms,
|
||||
served_from_cache: false,
|
||||
stale_cache_used: false,
|
||||
},
|
||||
totals: RuntimeEdgeConnectionTotalsData {
|
||||
current_connections: shared.stats.get_current_connections_total(),
|
||||
current_connections_me: shared.stats.get_current_connections_me(),
|
||||
current_connections_direct: shared.stats.get_current_connections_direct(),
|
||||
active_users,
|
||||
},
|
||||
top: RuntimeEdgeConnectionTopData {
|
||||
limit,
|
||||
by_connections,
|
||||
by_throughput,
|
||||
},
|
||||
telemetry: RuntimeEdgeConnectionTelemetryData {
|
||||
user_enabled: telemetry.user_enabled,
|
||||
throughput_is_cumulative: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_recent_events_limit(query: Option<&str>, default_limit: usize, max_limit: usize) -> usize {
|
||||
let Some(query) = query else {
|
||||
return default_limit;
|
||||
};
|
||||
for pair in query.split('&') {
|
||||
let mut split = pair.splitn(2, '=');
|
||||
if split.next() == Some("limit")
|
||||
&& let Some(raw) = split.next()
|
||||
&& let Ok(parsed) = raw.parse::<usize>()
|
||||
{
|
||||
return parsed.clamp(1, max_limit);
|
||||
}
|
||||
}
|
||||
default_limit
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
534
src/api/runtime_min.rs
Normal file
534
src/api/runtime_min.rs
Normal file
@@ -0,0 +1,534 @@
|
||||
use std::collections::BTreeSet;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
|
||||
use super::ApiShared;
|
||||
|
||||
const SOURCE_UNAVAILABLE_REASON: &str = "source_unavailable";
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct SecurityWhitelistData {
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) enabled: bool,
|
||||
pub(super) entries_total: usize,
|
||||
pub(super) entries: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateGenerationData {
|
||||
pub(super) active_generation: u64,
|
||||
pub(super) warm_generation: u64,
|
||||
pub(super) pending_hardswap_generation: u64,
|
||||
pub(super) pending_hardswap_age_secs: Option<u64>,
|
||||
pub(super) draining_generations: Vec<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateHardswapData {
|
||||
pub(super) enabled: bool,
|
||||
pub(super) pending: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateWriterContourData {
|
||||
pub(super) warm: usize,
|
||||
pub(super) active: usize,
|
||||
pub(super) draining: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateWriterHealthData {
|
||||
pub(super) healthy: usize,
|
||||
pub(super) degraded: usize,
|
||||
pub(super) draining: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateWriterData {
|
||||
pub(super) total: usize,
|
||||
pub(super) alive_non_draining: usize,
|
||||
pub(super) draining: usize,
|
||||
pub(super) degraded: usize,
|
||||
pub(super) contour: RuntimeMePoolStateWriterContourData,
|
||||
pub(super) health: RuntimeMePoolStateWriterHealthData,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateRefillDcData {
|
||||
pub(super) dc: i16,
|
||||
pub(super) family: &'static str,
|
||||
pub(super) inflight: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateRefillData {
|
||||
pub(super) inflight_endpoints_total: usize,
|
||||
pub(super) inflight_dc_total: usize,
|
||||
pub(super) by_dc: Vec<RuntimeMePoolStateRefillDcData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStatePayload {
|
||||
pub(super) generations: RuntimeMePoolStateGenerationData,
|
||||
pub(super) hardswap: RuntimeMePoolStateHardswapData,
|
||||
pub(super) writers: RuntimeMePoolStateWriterData,
|
||||
pub(super) refill: RuntimeMePoolStateRefillData,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeMePoolStatePayload>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityCountersData {
|
||||
pub(super) idle_close_by_peer_total: u64,
|
||||
pub(super) reader_eof_total: u64,
|
||||
pub(super) kdf_drift_total: u64,
|
||||
pub(super) kdf_port_only_drift_total: u64,
|
||||
pub(super) reconnect_attempt_total: u64,
|
||||
pub(super) reconnect_success_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityRouteDropData {
|
||||
pub(super) no_conn_total: u64,
|
||||
pub(super) channel_closed_total: u64,
|
||||
pub(super) queue_full_total: u64,
|
||||
pub(super) queue_full_base_total: u64,
|
||||
pub(super) queue_full_high_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityDcRttData {
|
||||
pub(super) dc: i16,
|
||||
pub(super) rtt_ema_ms: Option<f64>,
|
||||
pub(super) alive_writers: usize,
|
||||
pub(super) required_writers: usize,
|
||||
pub(super) coverage_pct: f64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityPayload {
|
||||
pub(super) counters: RuntimeMeQualityCountersData,
|
||||
pub(super) route_drops: RuntimeMeQualityRouteDropData,
|
||||
pub(super) dc_rtt: Vec<RuntimeMeQualityDcRttData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeMeQualityPayload>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityPolicyData {
|
||||
pub(super) connect_retry_attempts: u32,
|
||||
pub(super) connect_retry_backoff_ms: u64,
|
||||
pub(super) connect_budget_ms: u64,
|
||||
pub(super) unhealthy_fail_threshold: u32,
|
||||
pub(super) connect_failfast_hard_errors: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityCountersData {
|
||||
pub(super) connect_attempt_total: u64,
|
||||
pub(super) connect_success_total: u64,
|
||||
pub(super) connect_fail_total: u64,
|
||||
pub(super) connect_failfast_hard_error_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualitySummaryData {
|
||||
pub(super) configured_total: usize,
|
||||
pub(super) healthy_total: usize,
|
||||
pub(super) unhealthy_total: usize,
|
||||
pub(super) direct_total: usize,
|
||||
pub(super) socks4_total: usize,
|
||||
pub(super) socks5_total: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityDcData {
|
||||
pub(super) dc: i16,
|
||||
pub(super) latency_ema_ms: Option<f64>,
|
||||
pub(super) ip_preference: &'static str,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityUpstreamData {
|
||||
pub(super) upstream_id: usize,
|
||||
pub(super) route_kind: &'static str,
|
||||
pub(super) address: String,
|
||||
pub(super) weight: u16,
|
||||
pub(super) scopes: String,
|
||||
pub(super) healthy: bool,
|
||||
pub(super) fails: u32,
|
||||
pub(super) last_check_age_secs: u64,
|
||||
pub(super) effective_latency_ms: Option<f64>,
|
||||
pub(super) dc: Vec<RuntimeUpstreamQualityDcData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) policy: RuntimeUpstreamQualityPolicyData,
|
||||
pub(super) counters: RuntimeUpstreamQualityCountersData,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) summary: Option<RuntimeUpstreamQualitySummaryData>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) upstreams: Option<Vec<RuntimeUpstreamQualityUpstreamData>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunReflectionData {
|
||||
pub(super) addr: String,
|
||||
pub(super) age_secs: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunFlagsData {
|
||||
pub(super) nat_probe_enabled: bool,
|
||||
pub(super) nat_probe_disabled_runtime: bool,
|
||||
pub(super) nat_probe_attempts: u8,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunServersData {
|
||||
pub(super) configured: Vec<String>,
|
||||
pub(super) live: Vec<String>,
|
||||
pub(super) live_total: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunReflectionBlockData {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) v4: Option<RuntimeNatStunReflectionData>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) v6: Option<RuntimeNatStunReflectionData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunPayload {
|
||||
pub(super) flags: RuntimeNatStunFlagsData,
|
||||
pub(super) servers: RuntimeNatStunServersData,
|
||||
pub(super) reflection: RuntimeNatStunReflectionBlockData,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) stun_backoff_remaining_ms: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeNatStunPayload>,
|
||||
}
|
||||
|
||||
pub(super) fn build_security_whitelist_data(cfg: &ProxyConfig) -> SecurityWhitelistData {
|
||||
let entries = cfg
|
||||
.server
|
||||
.api
|
||||
.whitelist
|
||||
.iter()
|
||||
.map(ToString::to_string)
|
||||
.collect::<Vec<_>>();
|
||||
SecurityWhitelistData {
|
||||
generated_at_epoch_secs: now_epoch_secs(),
|
||||
enabled: !entries.is_empty(),
|
||||
entries_total: entries.len(),
|
||||
entries,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_me_pool_state_data(shared: &ApiShared) -> RuntimeMePoolStateData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let Some(pool) = &shared.me_pool else {
|
||||
return RuntimeMePoolStateData {
|
||||
enabled: false,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
};
|
||||
|
||||
let status = pool.api_status_snapshot().await;
|
||||
let runtime = pool.api_runtime_snapshot().await;
|
||||
let refill = pool.api_refill_snapshot().await;
|
||||
|
||||
let mut draining_generations = BTreeSet::<u64>::new();
|
||||
let mut contour_warm = 0usize;
|
||||
let mut contour_active = 0usize;
|
||||
let mut contour_draining = 0usize;
|
||||
let mut draining = 0usize;
|
||||
let mut degraded = 0usize;
|
||||
let mut healthy = 0usize;
|
||||
|
||||
for writer in &status.writers {
|
||||
if writer.draining {
|
||||
draining_generations.insert(writer.generation);
|
||||
draining += 1;
|
||||
}
|
||||
if writer.degraded && !writer.draining {
|
||||
degraded += 1;
|
||||
}
|
||||
if !writer.degraded && !writer.draining {
|
||||
healthy += 1;
|
||||
}
|
||||
match writer.state {
|
||||
"warm" => contour_warm += 1,
|
||||
"active" => contour_active += 1,
|
||||
_ => contour_draining += 1,
|
||||
}
|
||||
}
|
||||
|
||||
RuntimeMePoolStateData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs: status.generated_at_epoch_secs,
|
||||
data: Some(RuntimeMePoolStatePayload {
|
||||
generations: RuntimeMePoolStateGenerationData {
|
||||
active_generation: runtime.active_generation,
|
||||
warm_generation: runtime.warm_generation,
|
||||
pending_hardswap_generation: runtime.pending_hardswap_generation,
|
||||
pending_hardswap_age_secs: runtime.pending_hardswap_age_secs,
|
||||
draining_generations: draining_generations.into_iter().collect(),
|
||||
},
|
||||
hardswap: RuntimeMePoolStateHardswapData {
|
||||
enabled: runtime.hardswap_enabled,
|
||||
pending: runtime.pending_hardswap_generation != 0,
|
||||
},
|
||||
writers: RuntimeMePoolStateWriterData {
|
||||
total: status.writers.len(),
|
||||
alive_non_draining: status.writers.len().saturating_sub(draining),
|
||||
draining,
|
||||
degraded,
|
||||
contour: RuntimeMePoolStateWriterContourData {
|
||||
warm: contour_warm,
|
||||
active: contour_active,
|
||||
draining: contour_draining,
|
||||
},
|
||||
health: RuntimeMePoolStateWriterHealthData {
|
||||
healthy,
|
||||
degraded,
|
||||
draining,
|
||||
},
|
||||
},
|
||||
refill: RuntimeMePoolStateRefillData {
|
||||
inflight_endpoints_total: refill.inflight_endpoints_total,
|
||||
inflight_dc_total: refill.inflight_dc_total,
|
||||
by_dc: refill
|
||||
.by_dc
|
||||
.into_iter()
|
||||
.map(|entry| RuntimeMePoolStateRefillDcData {
|
||||
dc: entry.dc,
|
||||
family: entry.family,
|
||||
inflight: entry.inflight,
|
||||
})
|
||||
.collect(),
|
||||
},
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_me_quality_data(shared: &ApiShared) -> RuntimeMeQualityData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let Some(pool) = &shared.me_pool else {
|
||||
return RuntimeMeQualityData {
|
||||
enabled: false,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
};
|
||||
|
||||
let status = pool.api_status_snapshot().await;
|
||||
RuntimeMeQualityData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs: status.generated_at_epoch_secs,
|
||||
data: Some(RuntimeMeQualityPayload {
|
||||
counters: RuntimeMeQualityCountersData {
|
||||
idle_close_by_peer_total: shared.stats.get_me_idle_close_by_peer_total(),
|
||||
reader_eof_total: shared.stats.get_me_reader_eof_total(),
|
||||
kdf_drift_total: shared.stats.get_me_kdf_drift_total(),
|
||||
kdf_port_only_drift_total: shared.stats.get_me_kdf_port_only_drift_total(),
|
||||
reconnect_attempt_total: shared.stats.get_me_reconnect_attempts(),
|
||||
reconnect_success_total: shared.stats.get_me_reconnect_success(),
|
||||
},
|
||||
route_drops: RuntimeMeQualityRouteDropData {
|
||||
no_conn_total: shared.stats.get_me_route_drop_no_conn(),
|
||||
channel_closed_total: shared.stats.get_me_route_drop_channel_closed(),
|
||||
queue_full_total: shared.stats.get_me_route_drop_queue_full(),
|
||||
queue_full_base_total: shared.stats.get_me_route_drop_queue_full_base(),
|
||||
queue_full_high_total: shared.stats.get_me_route_drop_queue_full_high(),
|
||||
},
|
||||
dc_rtt: status
|
||||
.dcs
|
||||
.into_iter()
|
||||
.map(|dc| RuntimeMeQualityDcRttData {
|
||||
dc: dc.dc,
|
||||
rtt_ema_ms: dc.rtt_ms,
|
||||
alive_writers: dc.alive_writers,
|
||||
required_writers: dc.required_writers,
|
||||
coverage_pct: dc.coverage_pct,
|
||||
})
|
||||
.collect(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_upstream_quality_data(
|
||||
shared: &ApiShared,
|
||||
) -> RuntimeUpstreamQualityData {
|
||||
let generated_at_epoch_secs = now_epoch_secs();
|
||||
let policy = shared.upstream_manager.api_policy_snapshot();
|
||||
let counters = RuntimeUpstreamQualityCountersData {
|
||||
connect_attempt_total: shared.stats.get_upstream_connect_attempt_total(),
|
||||
connect_success_total: shared.stats.get_upstream_connect_success_total(),
|
||||
connect_fail_total: shared.stats.get_upstream_connect_fail_total(),
|
||||
connect_failfast_hard_error_total: shared.stats.get_upstream_connect_failfast_hard_error_total(),
|
||||
};
|
||||
|
||||
let Some(snapshot) = shared.upstream_manager.try_api_snapshot() else {
|
||||
return RuntimeUpstreamQualityData {
|
||||
enabled: false,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs,
|
||||
policy: RuntimeUpstreamQualityPolicyData {
|
||||
connect_retry_attempts: policy.connect_retry_attempts,
|
||||
connect_retry_backoff_ms: policy.connect_retry_backoff_ms,
|
||||
connect_budget_ms: policy.connect_budget_ms,
|
||||
unhealthy_fail_threshold: policy.unhealthy_fail_threshold,
|
||||
connect_failfast_hard_errors: policy.connect_failfast_hard_errors,
|
||||
},
|
||||
counters,
|
||||
summary: None,
|
||||
upstreams: None,
|
||||
};
|
||||
};
|
||||
|
||||
RuntimeUpstreamQualityData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
policy: RuntimeUpstreamQualityPolicyData {
|
||||
connect_retry_attempts: policy.connect_retry_attempts,
|
||||
connect_retry_backoff_ms: policy.connect_retry_backoff_ms,
|
||||
connect_budget_ms: policy.connect_budget_ms,
|
||||
unhealthy_fail_threshold: policy.unhealthy_fail_threshold,
|
||||
connect_failfast_hard_errors: policy.connect_failfast_hard_errors,
|
||||
},
|
||||
counters,
|
||||
summary: Some(RuntimeUpstreamQualitySummaryData {
|
||||
configured_total: snapshot.summary.configured_total,
|
||||
healthy_total: snapshot.summary.healthy_total,
|
||||
unhealthy_total: snapshot.summary.unhealthy_total,
|
||||
direct_total: snapshot.summary.direct_total,
|
||||
socks4_total: snapshot.summary.socks4_total,
|
||||
socks5_total: snapshot.summary.socks5_total,
|
||||
}),
|
||||
upstreams: Some(
|
||||
snapshot
|
||||
.upstreams
|
||||
.into_iter()
|
||||
.map(|upstream| RuntimeUpstreamQualityUpstreamData {
|
||||
upstream_id: upstream.upstream_id,
|
||||
route_kind: match upstream.route_kind {
|
||||
crate::transport::UpstreamRouteKind::Direct => "direct",
|
||||
crate::transport::UpstreamRouteKind::Socks4 => "socks4",
|
||||
crate::transport::UpstreamRouteKind::Socks5 => "socks5",
|
||||
},
|
||||
address: upstream.address,
|
||||
weight: upstream.weight,
|
||||
scopes: upstream.scopes,
|
||||
healthy: upstream.healthy,
|
||||
fails: upstream.fails,
|
||||
last_check_age_secs: upstream.last_check_age_secs,
|
||||
effective_latency_ms: upstream.effective_latency_ms,
|
||||
dc: upstream
|
||||
.dc
|
||||
.into_iter()
|
||||
.map(|dc| RuntimeUpstreamQualityDcData {
|
||||
dc: dc.dc,
|
||||
latency_ema_ms: dc.latency_ema_ms,
|
||||
ip_preference: match dc.ip_preference {
|
||||
crate::transport::upstream::IpPreference::Unknown => "unknown",
|
||||
crate::transport::upstream::IpPreference::PreferV6 => "prefer_v6",
|
||||
crate::transport::upstream::IpPreference::PreferV4 => "prefer_v4",
|
||||
crate::transport::upstream::IpPreference::BothWork => "both_work",
|
||||
crate::transport::upstream::IpPreference::Unavailable => "unavailable",
|
||||
},
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
.collect(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_nat_stun_data(shared: &ApiShared) -> RuntimeNatStunData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let Some(pool) = &shared.me_pool else {
|
||||
return RuntimeNatStunData {
|
||||
enabled: false,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
};
|
||||
|
||||
let snapshot = pool.api_nat_stun_snapshot().await;
|
||||
RuntimeNatStunData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: Some(RuntimeNatStunPayload {
|
||||
flags: RuntimeNatStunFlagsData {
|
||||
nat_probe_enabled: snapshot.nat_probe_enabled,
|
||||
nat_probe_disabled_runtime: snapshot.nat_probe_disabled_runtime,
|
||||
nat_probe_attempts: snapshot.nat_probe_attempts,
|
||||
},
|
||||
servers: RuntimeNatStunServersData {
|
||||
configured: snapshot.configured_servers,
|
||||
live: snapshot.live_servers.clone(),
|
||||
live_total: snapshot.live_servers.len(),
|
||||
},
|
||||
reflection: RuntimeNatStunReflectionBlockData {
|
||||
v4: snapshot.reflection_v4.map(|entry| RuntimeNatStunReflectionData {
|
||||
addr: entry.addr.to_string(),
|
||||
age_secs: entry.age_secs,
|
||||
}),
|
||||
v6: snapshot.reflection_v6.map(|entry| RuntimeNatStunReflectionData {
|
||||
addr: entry.addr.to_string(),
|
||||
age_secs: entry.age_secs,
|
||||
}),
|
||||
},
|
||||
stun_backoff_remaining_ms: snapshot.stun_backoff_remaining_ms,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
484
src/api/runtime_stats.rs
Normal file
484
src/api/runtime_stats.rs
Normal file
@@ -0,0 +1,484 @@
|
||||
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use crate::config::ApiConfig;
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::upstream::IpPreference;
|
||||
use crate::transport::UpstreamRouteKind;
|
||||
|
||||
use super::ApiShared;
|
||||
use super::model::{
|
||||
DcStatus, DcStatusData, MeWriterStatus, MeWritersData, MeWritersSummary, MinimalAllData,
|
||||
MinimalAllPayload, MinimalDcPathData, MinimalMeRuntimeData, MinimalQuarantineData,
|
||||
UpstreamDcStatus, UpstreamStatus, UpstreamSummaryData, UpstreamsData, ZeroAllData,
|
||||
ZeroCodeCount, ZeroCoreData, ZeroDesyncData, ZeroMiddleProxyData, ZeroPoolData,
|
||||
ZeroUpstreamData,
|
||||
};
|
||||
|
||||
const FEATURE_DISABLED_REASON: &str = "feature_disabled";
|
||||
const SOURCE_UNAVAILABLE_REASON: &str = "source_unavailable";
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct MinimalCacheEntry {
|
||||
pub(super) expires_at: Instant,
|
||||
pub(super) payload: MinimalAllPayload,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
}
|
||||
|
||||
pub(super) fn build_zero_all_data(stats: &Stats, configured_users: usize) -> ZeroAllData {
|
||||
let telemetry = stats.telemetry_policy();
|
||||
let handshake_error_codes = stats
|
||||
.get_me_handshake_error_code_counts()
|
||||
.into_iter()
|
||||
.map(|(code, total)| ZeroCodeCount { code, total })
|
||||
.collect();
|
||||
|
||||
ZeroAllData {
|
||||
generated_at_epoch_secs: now_epoch_secs(),
|
||||
core: ZeroCoreData {
|
||||
uptime_seconds: stats.uptime_secs(),
|
||||
connections_total: stats.get_connects_all(),
|
||||
connections_bad_total: stats.get_connects_bad(),
|
||||
handshake_timeouts_total: stats.get_handshake_timeouts(),
|
||||
configured_users,
|
||||
telemetry_core_enabled: telemetry.core_enabled,
|
||||
telemetry_user_enabled: telemetry.user_enabled,
|
||||
telemetry_me_level: telemetry.me_level.to_string(),
|
||||
},
|
||||
upstream: build_zero_upstream_data(stats),
|
||||
middle_proxy: ZeroMiddleProxyData {
|
||||
keepalive_sent_total: stats.get_me_keepalive_sent(),
|
||||
keepalive_failed_total: stats.get_me_keepalive_failed(),
|
||||
keepalive_pong_total: stats.get_me_keepalive_pong(),
|
||||
keepalive_timeout_total: stats.get_me_keepalive_timeout(),
|
||||
rpc_proxy_req_signal_sent_total: stats.get_me_rpc_proxy_req_signal_sent_total(),
|
||||
rpc_proxy_req_signal_failed_total: stats.get_me_rpc_proxy_req_signal_failed_total(),
|
||||
rpc_proxy_req_signal_skipped_no_meta_total: stats
|
||||
.get_me_rpc_proxy_req_signal_skipped_no_meta_total(),
|
||||
rpc_proxy_req_signal_response_total: stats.get_me_rpc_proxy_req_signal_response_total(),
|
||||
rpc_proxy_req_signal_close_sent_total: stats
|
||||
.get_me_rpc_proxy_req_signal_close_sent_total(),
|
||||
reconnect_attempt_total: stats.get_me_reconnect_attempts(),
|
||||
reconnect_success_total: stats.get_me_reconnect_success(),
|
||||
handshake_reject_total: stats.get_me_handshake_reject_total(),
|
||||
handshake_error_codes,
|
||||
reader_eof_total: stats.get_me_reader_eof_total(),
|
||||
idle_close_by_peer_total: stats.get_me_idle_close_by_peer_total(),
|
||||
route_drop_no_conn_total: stats.get_me_route_drop_no_conn(),
|
||||
route_drop_channel_closed_total: stats.get_me_route_drop_channel_closed(),
|
||||
route_drop_queue_full_total: stats.get_me_route_drop_queue_full(),
|
||||
route_drop_queue_full_base_total: stats.get_me_route_drop_queue_full_base(),
|
||||
route_drop_queue_full_high_total: stats.get_me_route_drop_queue_full_high(),
|
||||
socks_kdf_strict_reject_total: stats.get_me_socks_kdf_strict_reject(),
|
||||
socks_kdf_compat_fallback_total: stats.get_me_socks_kdf_compat_fallback(),
|
||||
endpoint_quarantine_total: stats.get_me_endpoint_quarantine_total(),
|
||||
kdf_drift_total: stats.get_me_kdf_drift_total(),
|
||||
kdf_port_only_drift_total: stats.get_me_kdf_port_only_drift_total(),
|
||||
hardswap_pending_reuse_total: stats.get_me_hardswap_pending_reuse_total(),
|
||||
hardswap_pending_ttl_expired_total: stats.get_me_hardswap_pending_ttl_expired_total(),
|
||||
single_endpoint_outage_enter_total: stats.get_me_single_endpoint_outage_enter_total(),
|
||||
single_endpoint_outage_exit_total: stats.get_me_single_endpoint_outage_exit_total(),
|
||||
single_endpoint_outage_reconnect_attempt_total: stats
|
||||
.get_me_single_endpoint_outage_reconnect_attempt_total(),
|
||||
single_endpoint_outage_reconnect_success_total: stats
|
||||
.get_me_single_endpoint_outage_reconnect_success_total(),
|
||||
single_endpoint_quarantine_bypass_total: stats
|
||||
.get_me_single_endpoint_quarantine_bypass_total(),
|
||||
single_endpoint_shadow_rotate_total: stats.get_me_single_endpoint_shadow_rotate_total(),
|
||||
single_endpoint_shadow_rotate_skipped_quarantine_total: stats
|
||||
.get_me_single_endpoint_shadow_rotate_skipped_quarantine_total(),
|
||||
floor_mode_switch_total: stats.get_me_floor_mode_switch_total(),
|
||||
floor_mode_switch_static_to_adaptive_total: stats
|
||||
.get_me_floor_mode_switch_static_to_adaptive_total(),
|
||||
floor_mode_switch_adaptive_to_static_total: stats
|
||||
.get_me_floor_mode_switch_adaptive_to_static_total(),
|
||||
},
|
||||
pool: ZeroPoolData {
|
||||
pool_swap_total: stats.get_pool_swap_total(),
|
||||
pool_drain_active: stats.get_pool_drain_active(),
|
||||
pool_force_close_total: stats.get_pool_force_close_total(),
|
||||
pool_stale_pick_total: stats.get_pool_stale_pick_total(),
|
||||
writer_removed_total: stats.get_me_writer_removed_total(),
|
||||
writer_removed_unexpected_total: stats.get_me_writer_removed_unexpected_total(),
|
||||
refill_triggered_total: stats.get_me_refill_triggered_total(),
|
||||
refill_skipped_inflight_total: stats.get_me_refill_skipped_inflight_total(),
|
||||
refill_failed_total: stats.get_me_refill_failed_total(),
|
||||
writer_restored_same_endpoint_total: stats.get_me_writer_restored_same_endpoint_total(),
|
||||
writer_restored_fallback_total: stats.get_me_writer_restored_fallback_total(),
|
||||
},
|
||||
desync: ZeroDesyncData {
|
||||
secure_padding_invalid_total: stats.get_secure_padding_invalid(),
|
||||
desync_total: stats.get_desync_total(),
|
||||
desync_full_logged_total: stats.get_desync_full_logged(),
|
||||
desync_suppressed_total: stats.get_desync_suppressed(),
|
||||
desync_frames_bucket_0: stats.get_desync_frames_bucket_0(),
|
||||
desync_frames_bucket_1_2: stats.get_desync_frames_bucket_1_2(),
|
||||
desync_frames_bucket_3_10: stats.get_desync_frames_bucket_3_10(),
|
||||
desync_frames_bucket_gt_10: stats.get_desync_frames_bucket_gt_10(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn build_zero_upstream_data(stats: &Stats) -> ZeroUpstreamData {
|
||||
ZeroUpstreamData {
|
||||
connect_attempt_total: stats.get_upstream_connect_attempt_total(),
|
||||
connect_success_total: stats.get_upstream_connect_success_total(),
|
||||
connect_fail_total: stats.get_upstream_connect_fail_total(),
|
||||
connect_failfast_hard_error_total: stats.get_upstream_connect_failfast_hard_error_total(),
|
||||
connect_attempts_bucket_1: stats.get_upstream_connect_attempts_bucket_1(),
|
||||
connect_attempts_bucket_2: stats.get_upstream_connect_attempts_bucket_2(),
|
||||
connect_attempts_bucket_3_4: stats.get_upstream_connect_attempts_bucket_3_4(),
|
||||
connect_attempts_bucket_gt_4: stats.get_upstream_connect_attempts_bucket_gt_4(),
|
||||
connect_duration_success_bucket_le_100ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_le_100ms(),
|
||||
connect_duration_success_bucket_101_500ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_101_500ms(),
|
||||
connect_duration_success_bucket_501_1000ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_501_1000ms(),
|
||||
connect_duration_success_bucket_gt_1000ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_gt_1000ms(),
|
||||
connect_duration_fail_bucket_le_100ms: stats.get_upstream_connect_duration_fail_bucket_le_100ms(),
|
||||
connect_duration_fail_bucket_101_500ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_101_500ms(),
|
||||
connect_duration_fail_bucket_501_1000ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_501_1000ms(),
|
||||
connect_duration_fail_bucket_gt_1000ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_gt_1000ms(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn build_upstreams_data(shared: &ApiShared, api_cfg: &ApiConfig) -> UpstreamsData {
|
||||
let generated_at_epoch_secs = now_epoch_secs();
|
||||
let zero = build_zero_upstream_data(&shared.stats);
|
||||
if !api_cfg.minimal_runtime_enabled {
|
||||
return UpstreamsData {
|
||||
enabled: false,
|
||||
reason: Some(FEATURE_DISABLED_REASON),
|
||||
generated_at_epoch_secs,
|
||||
zero,
|
||||
summary: None,
|
||||
upstreams: None,
|
||||
};
|
||||
}
|
||||
|
||||
let Some(snapshot) = shared.upstream_manager.try_api_snapshot() else {
|
||||
return UpstreamsData {
|
||||
enabled: true,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs,
|
||||
zero,
|
||||
summary: None,
|
||||
upstreams: None,
|
||||
};
|
||||
};
|
||||
|
||||
let summary = UpstreamSummaryData {
|
||||
configured_total: snapshot.summary.configured_total,
|
||||
healthy_total: snapshot.summary.healthy_total,
|
||||
unhealthy_total: snapshot.summary.unhealthy_total,
|
||||
direct_total: snapshot.summary.direct_total,
|
||||
socks4_total: snapshot.summary.socks4_total,
|
||||
socks5_total: snapshot.summary.socks5_total,
|
||||
};
|
||||
let upstreams = snapshot
|
||||
.upstreams
|
||||
.into_iter()
|
||||
.map(|upstream| UpstreamStatus {
|
||||
upstream_id: upstream.upstream_id,
|
||||
route_kind: map_route_kind(upstream.route_kind),
|
||||
address: upstream.address,
|
||||
weight: upstream.weight,
|
||||
scopes: upstream.scopes,
|
||||
healthy: upstream.healthy,
|
||||
fails: upstream.fails,
|
||||
last_check_age_secs: upstream.last_check_age_secs,
|
||||
effective_latency_ms: upstream.effective_latency_ms,
|
||||
dc: upstream
|
||||
.dc
|
||||
.into_iter()
|
||||
.map(|dc| UpstreamDcStatus {
|
||||
dc: dc.dc,
|
||||
latency_ema_ms: dc.latency_ema_ms,
|
||||
ip_preference: map_ip_preference(dc.ip_preference),
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
UpstreamsData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
zero,
|
||||
summary: Some(summary),
|
||||
upstreams: Some(upstreams),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_minimal_all_data(
|
||||
shared: &ApiShared,
|
||||
api_cfg: &ApiConfig,
|
||||
) -> MinimalAllData {
|
||||
let now = now_epoch_secs();
|
||||
if !api_cfg.minimal_runtime_enabled {
|
||||
return MinimalAllData {
|
||||
enabled: false,
|
||||
reason: Some(FEATURE_DISABLED_REASON),
|
||||
generated_at_epoch_secs: now,
|
||||
data: None,
|
||||
};
|
||||
}
|
||||
|
||||
let Some((generated_at_epoch_secs, payload)) =
|
||||
get_minimal_payload_cached(shared, api_cfg.minimal_runtime_cache_ttl_ms).await
|
||||
else {
|
||||
return MinimalAllData {
|
||||
enabled: true,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now,
|
||||
data: Some(MinimalAllPayload {
|
||||
me_writers: disabled_me_writers(now, SOURCE_UNAVAILABLE_REASON),
|
||||
dcs: disabled_dcs(now, SOURCE_UNAVAILABLE_REASON),
|
||||
me_runtime: None,
|
||||
network_path: Vec::new(),
|
||||
}),
|
||||
};
|
||||
};
|
||||
|
||||
MinimalAllData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
data: Some(payload),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_me_writers_data(
|
||||
shared: &ApiShared,
|
||||
api_cfg: &ApiConfig,
|
||||
) -> MeWritersData {
|
||||
let now = now_epoch_secs();
|
||||
if !api_cfg.minimal_runtime_enabled {
|
||||
return disabled_me_writers(now, FEATURE_DISABLED_REASON);
|
||||
}
|
||||
|
||||
let Some((_, payload)) =
|
||||
get_minimal_payload_cached(shared, api_cfg.minimal_runtime_cache_ttl_ms).await
|
||||
else {
|
||||
return disabled_me_writers(now, SOURCE_UNAVAILABLE_REASON);
|
||||
};
|
||||
payload.me_writers
|
||||
}
|
||||
|
||||
pub(super) async fn build_dcs_data(shared: &ApiShared, api_cfg: &ApiConfig) -> DcStatusData {
|
||||
let now = now_epoch_secs();
|
||||
if !api_cfg.minimal_runtime_enabled {
|
||||
return disabled_dcs(now, FEATURE_DISABLED_REASON);
|
||||
}
|
||||
|
||||
let Some((_, payload)) =
|
||||
get_minimal_payload_cached(shared, api_cfg.minimal_runtime_cache_ttl_ms).await
|
||||
else {
|
||||
return disabled_dcs(now, SOURCE_UNAVAILABLE_REASON);
|
||||
};
|
||||
payload.dcs
|
||||
}
|
||||
|
||||
async fn get_minimal_payload_cached(
|
||||
shared: &ApiShared,
|
||||
cache_ttl_ms: u64,
|
||||
) -> Option<(u64, MinimalAllPayload)> {
|
||||
if cache_ttl_ms > 0 {
|
||||
let now = Instant::now();
|
||||
let cached = shared.minimal_cache.lock().await.clone();
|
||||
if let Some(entry) = cached
|
||||
&& now < entry.expires_at
|
||||
{
|
||||
return Some((entry.generated_at_epoch_secs, entry.payload));
|
||||
}
|
||||
}
|
||||
|
||||
let pool = shared.me_pool.as_ref()?;
|
||||
let status = pool.api_status_snapshot().await;
|
||||
let runtime = pool.api_runtime_snapshot().await;
|
||||
let generated_at_epoch_secs = status.generated_at_epoch_secs;
|
||||
|
||||
let me_writers = MeWritersData {
|
||||
middle_proxy_enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
summary: MeWritersSummary {
|
||||
configured_dc_groups: status.configured_dc_groups,
|
||||
configured_endpoints: status.configured_endpoints,
|
||||
available_endpoints: status.available_endpoints,
|
||||
available_pct: status.available_pct,
|
||||
required_writers: status.required_writers,
|
||||
alive_writers: status.alive_writers,
|
||||
coverage_pct: status.coverage_pct,
|
||||
},
|
||||
writers: status
|
||||
.writers
|
||||
.into_iter()
|
||||
.map(|entry| MeWriterStatus {
|
||||
writer_id: entry.writer_id,
|
||||
dc: entry.dc,
|
||||
endpoint: entry.endpoint.to_string(),
|
||||
generation: entry.generation,
|
||||
state: entry.state,
|
||||
draining: entry.draining,
|
||||
degraded: entry.degraded,
|
||||
bound_clients: entry.bound_clients,
|
||||
idle_for_secs: entry.idle_for_secs,
|
||||
rtt_ema_ms: entry.rtt_ema_ms,
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
let dcs = DcStatusData {
|
||||
middle_proxy_enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
dcs: status
|
||||
.dcs
|
||||
.into_iter()
|
||||
.map(|entry| DcStatus {
|
||||
dc: entry.dc,
|
||||
endpoints: entry
|
||||
.endpoints
|
||||
.into_iter()
|
||||
.map(|value| value.to_string())
|
||||
.collect(),
|
||||
available_endpoints: entry.available_endpoints,
|
||||
available_pct: entry.available_pct,
|
||||
required_writers: entry.required_writers,
|
||||
alive_writers: entry.alive_writers,
|
||||
coverage_pct: entry.coverage_pct,
|
||||
rtt_ms: entry.rtt_ms,
|
||||
load: entry.load,
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
let me_runtime = MinimalMeRuntimeData {
|
||||
active_generation: runtime.active_generation,
|
||||
warm_generation: runtime.warm_generation,
|
||||
pending_hardswap_generation: runtime.pending_hardswap_generation,
|
||||
pending_hardswap_age_secs: runtime.pending_hardswap_age_secs,
|
||||
hardswap_enabled: runtime.hardswap_enabled,
|
||||
floor_mode: runtime.floor_mode,
|
||||
adaptive_floor_idle_secs: runtime.adaptive_floor_idle_secs,
|
||||
adaptive_floor_min_writers_single_endpoint: runtime
|
||||
.adaptive_floor_min_writers_single_endpoint,
|
||||
adaptive_floor_recover_grace_secs: runtime.adaptive_floor_recover_grace_secs,
|
||||
me_keepalive_enabled: runtime.me_keepalive_enabled,
|
||||
me_keepalive_interval_secs: runtime.me_keepalive_interval_secs,
|
||||
me_keepalive_jitter_secs: runtime.me_keepalive_jitter_secs,
|
||||
me_keepalive_payload_random: runtime.me_keepalive_payload_random,
|
||||
rpc_proxy_req_every_secs: runtime.rpc_proxy_req_every_secs,
|
||||
me_reconnect_max_concurrent_per_dc: runtime.me_reconnect_max_concurrent_per_dc,
|
||||
me_reconnect_backoff_base_ms: runtime.me_reconnect_backoff_base_ms,
|
||||
me_reconnect_backoff_cap_ms: runtime.me_reconnect_backoff_cap_ms,
|
||||
me_reconnect_fast_retry_count: runtime.me_reconnect_fast_retry_count,
|
||||
me_pool_drain_ttl_secs: runtime.me_pool_drain_ttl_secs,
|
||||
me_pool_force_close_secs: runtime.me_pool_force_close_secs,
|
||||
me_pool_min_fresh_ratio: runtime.me_pool_min_fresh_ratio,
|
||||
me_bind_stale_mode: runtime.me_bind_stale_mode,
|
||||
me_bind_stale_ttl_secs: runtime.me_bind_stale_ttl_secs,
|
||||
me_single_endpoint_shadow_writers: runtime.me_single_endpoint_shadow_writers,
|
||||
me_single_endpoint_outage_mode_enabled: runtime.me_single_endpoint_outage_mode_enabled,
|
||||
me_single_endpoint_outage_disable_quarantine: runtime
|
||||
.me_single_endpoint_outage_disable_quarantine,
|
||||
me_single_endpoint_outage_backoff_min_ms: runtime.me_single_endpoint_outage_backoff_min_ms,
|
||||
me_single_endpoint_outage_backoff_max_ms: runtime.me_single_endpoint_outage_backoff_max_ms,
|
||||
me_single_endpoint_shadow_rotate_every_secs: runtime
|
||||
.me_single_endpoint_shadow_rotate_every_secs,
|
||||
me_deterministic_writer_sort: runtime.me_deterministic_writer_sort,
|
||||
me_socks_kdf_policy: runtime.me_socks_kdf_policy,
|
||||
quarantined_endpoints_total: runtime.quarantined_endpoints.len(),
|
||||
quarantined_endpoints: runtime
|
||||
.quarantined_endpoints
|
||||
.into_iter()
|
||||
.map(|entry| MinimalQuarantineData {
|
||||
endpoint: entry.endpoint.to_string(),
|
||||
remaining_ms: entry.remaining_ms,
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
let network_path = runtime
|
||||
.network_path
|
||||
.into_iter()
|
||||
.map(|entry| MinimalDcPathData {
|
||||
dc: entry.dc,
|
||||
ip_preference: entry.ip_preference,
|
||||
selected_addr_v4: entry.selected_addr_v4.map(|value| value.to_string()),
|
||||
selected_addr_v6: entry.selected_addr_v6.map(|value| value.to_string()),
|
||||
})
|
||||
.collect();
|
||||
|
||||
let payload = MinimalAllPayload {
|
||||
me_writers,
|
||||
dcs,
|
||||
me_runtime: Some(me_runtime),
|
||||
network_path,
|
||||
};
|
||||
|
||||
if cache_ttl_ms > 0 {
|
||||
let entry = MinimalCacheEntry {
|
||||
expires_at: Instant::now() + Duration::from_millis(cache_ttl_ms),
|
||||
payload: payload.clone(),
|
||||
generated_at_epoch_secs,
|
||||
};
|
||||
*shared.minimal_cache.lock().await = Some(entry);
|
||||
}
|
||||
|
||||
Some((generated_at_epoch_secs, payload))
|
||||
}
|
||||
|
||||
fn disabled_me_writers(now_epoch_secs: u64, reason: &'static str) -> MeWritersData {
|
||||
MeWritersData {
|
||||
middle_proxy_enabled: false,
|
||||
reason: Some(reason),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
summary: MeWritersSummary {
|
||||
configured_dc_groups: 0,
|
||||
configured_endpoints: 0,
|
||||
available_endpoints: 0,
|
||||
available_pct: 0.0,
|
||||
required_writers: 0,
|
||||
alive_writers: 0,
|
||||
coverage_pct: 0.0,
|
||||
},
|
||||
writers: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn disabled_dcs(now_epoch_secs: u64, reason: &'static str) -> DcStatusData {
|
||||
DcStatusData {
|
||||
middle_proxy_enabled: false,
|
||||
reason: Some(reason),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
dcs: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_route_kind(value: UpstreamRouteKind) -> &'static str {
|
||||
match value {
|
||||
UpstreamRouteKind::Direct => "direct",
|
||||
UpstreamRouteKind::Socks4 => "socks4",
|
||||
UpstreamRouteKind::Socks5 => "socks5",
|
||||
}
|
||||
}
|
||||
|
||||
fn map_ip_preference(value: IpPreference) -> &'static str {
|
||||
match value {
|
||||
IpPreference::Unknown => "unknown",
|
||||
IpPreference::PreferV6 => "prefer_v6",
|
||||
IpPreference::PreferV4 => "prefer_v4",
|
||||
IpPreference::BothWork => "both_work",
|
||||
IpPreference::Unavailable => "unavailable",
|
||||
}
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
66
src/api/runtime_watch.rs
Normal file
66
src/api/runtime_watch.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use tokio::sync::watch;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
|
||||
use super::ApiRuntimeState;
|
||||
use super::events::ApiEventStore;
|
||||
|
||||
pub(super) fn spawn_runtime_watchers(
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
admission_rx: watch::Receiver<bool>,
|
||||
runtime_state: Arc<ApiRuntimeState>,
|
||||
runtime_events: Arc<ApiEventStore>,
|
||||
) {
|
||||
let mut config_rx_reload = config_rx;
|
||||
let runtime_state_reload = runtime_state.clone();
|
||||
let runtime_events_reload = runtime_events.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if config_rx_reload.changed().await.is_err() {
|
||||
break;
|
||||
}
|
||||
runtime_state_reload
|
||||
.config_reload_count
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
runtime_state_reload
|
||||
.last_config_reload_epoch_secs
|
||||
.store(now_epoch_secs(), Ordering::Relaxed);
|
||||
runtime_events_reload.record("config.reload.applied", "config receiver updated");
|
||||
}
|
||||
});
|
||||
|
||||
let mut admission_rx_watch = admission_rx;
|
||||
tokio::spawn(async move {
|
||||
runtime_state
|
||||
.admission_open
|
||||
.store(*admission_rx_watch.borrow(), Ordering::Relaxed);
|
||||
runtime_events.record(
|
||||
"admission.state",
|
||||
format!("accepting_new_connections={}", *admission_rx_watch.borrow()),
|
||||
);
|
||||
loop {
|
||||
if admission_rx_watch.changed().await.is_err() {
|
||||
break;
|
||||
}
|
||||
let admission_open = *admission_rx_watch.borrow();
|
||||
runtime_state
|
||||
.admission_open
|
||||
.store(admission_open, Ordering::Relaxed);
|
||||
runtime_events.record(
|
||||
"admission.state",
|
||||
format!("accepting_new_connections={}", admission_open),
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
227
src/api/runtime_zero.rs
Normal file
227
src/api/runtime_zero.rs
Normal file
@@ -0,0 +1,227 @@
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::config::{MeFloorMode, ProxyConfig, UserMaxUniqueIpsMode};
|
||||
|
||||
use super::ApiShared;
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct SystemInfoData {
|
||||
pub(super) version: String,
|
||||
pub(super) target_arch: String,
|
||||
pub(super) target_os: String,
|
||||
pub(super) build_profile: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) git_commit: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) build_time_utc: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) rustc_version: Option<String>,
|
||||
pub(super) process_started_at_epoch_secs: u64,
|
||||
pub(super) uptime_seconds: f64,
|
||||
pub(super) config_path: String,
|
||||
pub(super) config_hash: String,
|
||||
pub(super) config_reload_count: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) last_config_reload_epoch_secs: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeGatesData {
|
||||
pub(super) accepting_new_connections: bool,
|
||||
pub(super) conditional_cast_enabled: bool,
|
||||
pub(super) me_runtime_ready: bool,
|
||||
pub(super) me2dc_fallback_enabled: bool,
|
||||
pub(super) use_middle_proxy: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveTimeoutLimits {
|
||||
pub(super) client_handshake_secs: u64,
|
||||
pub(super) tg_connect_secs: u64,
|
||||
pub(super) client_keepalive_secs: u64,
|
||||
pub(super) client_ack_secs: u64,
|
||||
pub(super) me_one_retry: u8,
|
||||
pub(super) me_one_timeout_ms: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveUpstreamLimits {
|
||||
pub(super) connect_retry_attempts: u32,
|
||||
pub(super) connect_retry_backoff_ms: u64,
|
||||
pub(super) connect_budget_ms: u64,
|
||||
pub(super) unhealthy_fail_threshold: u32,
|
||||
pub(super) connect_failfast_hard_errors: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveMiddleProxyLimits {
|
||||
pub(super) floor_mode: &'static str,
|
||||
pub(super) adaptive_floor_idle_secs: u64,
|
||||
pub(super) adaptive_floor_min_writers_single_endpoint: u8,
|
||||
pub(super) adaptive_floor_recover_grace_secs: u64,
|
||||
pub(super) reconnect_max_concurrent_per_dc: u32,
|
||||
pub(super) reconnect_backoff_base_ms: u64,
|
||||
pub(super) reconnect_backoff_cap_ms: u64,
|
||||
pub(super) reconnect_fast_retry_count: u32,
|
||||
pub(super) me2dc_fallback: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveUserIpPolicyLimits {
|
||||
pub(super) mode: &'static str,
|
||||
pub(super) window_secs: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveLimitsData {
|
||||
pub(super) update_every_secs: u64,
|
||||
pub(super) me_reinit_every_secs: u64,
|
||||
pub(super) me_pool_force_close_secs: u64,
|
||||
pub(super) timeouts: EffectiveTimeoutLimits,
|
||||
pub(super) upstream: EffectiveUpstreamLimits,
|
||||
pub(super) middle_proxy: EffectiveMiddleProxyLimits,
|
||||
pub(super) user_ip_policy: EffectiveUserIpPolicyLimits,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct SecurityPostureData {
|
||||
pub(super) api_read_only: bool,
|
||||
pub(super) api_whitelist_enabled: bool,
|
||||
pub(super) api_whitelist_entries: usize,
|
||||
pub(super) api_auth_header_enabled: bool,
|
||||
pub(super) proxy_protocol_enabled: bool,
|
||||
pub(super) log_level: String,
|
||||
pub(super) telemetry_core_enabled: bool,
|
||||
pub(super) telemetry_user_enabled: bool,
|
||||
pub(super) telemetry_me_level: String,
|
||||
}
|
||||
|
||||
pub(super) fn build_system_info_data(
|
||||
shared: &ApiShared,
|
||||
_cfg: &ProxyConfig,
|
||||
revision: &str,
|
||||
) -> SystemInfoData {
|
||||
let last_reload_epoch_secs = shared
|
||||
.runtime_state
|
||||
.last_config_reload_epoch_secs
|
||||
.load(Ordering::Relaxed);
|
||||
let last_config_reload_epoch_secs = (last_reload_epoch_secs > 0).then_some(last_reload_epoch_secs);
|
||||
|
||||
let git_commit = option_env!("TELEMT_GIT_COMMIT")
|
||||
.or(option_env!("VERGEN_GIT_SHA"))
|
||||
.or(option_env!("GIT_COMMIT"))
|
||||
.map(ToString::to_string);
|
||||
let build_time_utc = option_env!("BUILD_TIME_UTC")
|
||||
.or(option_env!("VERGEN_BUILD_TIMESTAMP"))
|
||||
.map(ToString::to_string);
|
||||
let rustc_version = option_env!("RUSTC_VERSION")
|
||||
.or(option_env!("VERGEN_RUSTC_SEMVER"))
|
||||
.map(ToString::to_string);
|
||||
|
||||
SystemInfoData {
|
||||
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||
target_arch: std::env::consts::ARCH.to_string(),
|
||||
target_os: std::env::consts::OS.to_string(),
|
||||
build_profile: option_env!("PROFILE").unwrap_or("unknown").to_string(),
|
||||
git_commit,
|
||||
build_time_utc,
|
||||
rustc_version,
|
||||
process_started_at_epoch_secs: shared.runtime_state.process_started_at_epoch_secs,
|
||||
uptime_seconds: shared.stats.uptime_secs(),
|
||||
config_path: shared.config_path.display().to_string(),
|
||||
config_hash: revision.to_string(),
|
||||
config_reload_count: shared.runtime_state.config_reload_count.load(Ordering::Relaxed),
|
||||
last_config_reload_epoch_secs,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn build_runtime_gates_data(shared: &ApiShared, cfg: &ProxyConfig) -> RuntimeGatesData {
|
||||
let me_runtime_ready = if !cfg.general.use_middle_proxy {
|
||||
true
|
||||
} else {
|
||||
shared
|
||||
.me_pool
|
||||
.as_ref()
|
||||
.map(|pool| pool.is_runtime_ready())
|
||||
.unwrap_or(false)
|
||||
};
|
||||
|
||||
RuntimeGatesData {
|
||||
accepting_new_connections: shared.runtime_state.admission_open.load(Ordering::Relaxed),
|
||||
conditional_cast_enabled: cfg.general.use_middle_proxy,
|
||||
me_runtime_ready,
|
||||
me2dc_fallback_enabled: cfg.general.me2dc_fallback,
|
||||
use_middle_proxy: cfg.general.use_middle_proxy,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn build_limits_effective_data(cfg: &ProxyConfig) -> EffectiveLimitsData {
|
||||
EffectiveLimitsData {
|
||||
update_every_secs: cfg.general.effective_update_every_secs(),
|
||||
me_reinit_every_secs: cfg.general.effective_me_reinit_every_secs(),
|
||||
me_pool_force_close_secs: cfg.general.effective_me_pool_force_close_secs(),
|
||||
timeouts: EffectiveTimeoutLimits {
|
||||
client_handshake_secs: cfg.timeouts.client_handshake,
|
||||
tg_connect_secs: cfg.timeouts.tg_connect,
|
||||
client_keepalive_secs: cfg.timeouts.client_keepalive,
|
||||
client_ack_secs: cfg.timeouts.client_ack,
|
||||
me_one_retry: cfg.timeouts.me_one_retry,
|
||||
me_one_timeout_ms: cfg.timeouts.me_one_timeout_ms,
|
||||
},
|
||||
upstream: EffectiveUpstreamLimits {
|
||||
connect_retry_attempts: cfg.general.upstream_connect_retry_attempts,
|
||||
connect_retry_backoff_ms: cfg.general.upstream_connect_retry_backoff_ms,
|
||||
connect_budget_ms: cfg.general.upstream_connect_budget_ms,
|
||||
unhealthy_fail_threshold: cfg.general.upstream_unhealthy_fail_threshold,
|
||||
connect_failfast_hard_errors: cfg.general.upstream_connect_failfast_hard_errors,
|
||||
},
|
||||
middle_proxy: EffectiveMiddleProxyLimits {
|
||||
floor_mode: me_floor_mode_label(cfg.general.me_floor_mode),
|
||||
adaptive_floor_idle_secs: cfg.general.me_adaptive_floor_idle_secs,
|
||||
adaptive_floor_min_writers_single_endpoint: cfg
|
||||
.general
|
||||
.me_adaptive_floor_min_writers_single_endpoint,
|
||||
adaptive_floor_recover_grace_secs: cfg.general.me_adaptive_floor_recover_grace_secs,
|
||||
reconnect_max_concurrent_per_dc: cfg.general.me_reconnect_max_concurrent_per_dc,
|
||||
reconnect_backoff_base_ms: cfg.general.me_reconnect_backoff_base_ms,
|
||||
reconnect_backoff_cap_ms: cfg.general.me_reconnect_backoff_cap_ms,
|
||||
reconnect_fast_retry_count: cfg.general.me_reconnect_fast_retry_count,
|
||||
me2dc_fallback: cfg.general.me2dc_fallback,
|
||||
},
|
||||
user_ip_policy: EffectiveUserIpPolicyLimits {
|
||||
mode: user_max_unique_ips_mode_label(cfg.access.user_max_unique_ips_mode),
|
||||
window_secs: cfg.access.user_max_unique_ips_window_secs,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn build_security_posture_data(cfg: &ProxyConfig) -> SecurityPostureData {
|
||||
SecurityPostureData {
|
||||
api_read_only: cfg.server.api.read_only,
|
||||
api_whitelist_enabled: !cfg.server.api.whitelist.is_empty(),
|
||||
api_whitelist_entries: cfg.server.api.whitelist.len(),
|
||||
api_auth_header_enabled: !cfg.server.api.auth_header.is_empty(),
|
||||
proxy_protocol_enabled: cfg.server.proxy_protocol,
|
||||
log_level: cfg.general.log_level.to_string(),
|
||||
telemetry_core_enabled: cfg.general.telemetry.core_enabled,
|
||||
telemetry_user_enabled: cfg.general.telemetry.user_enabled,
|
||||
telemetry_me_level: cfg.general.telemetry.me_level.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn user_max_unique_ips_mode_label(mode: UserMaxUniqueIpsMode) -> &'static str {
|
||||
match mode {
|
||||
UserMaxUniqueIpsMode::ActiveWindow => "active_window",
|
||||
UserMaxUniqueIpsMode::TimeWindow => "time_window",
|
||||
UserMaxUniqueIpsMode::Combined => "combined",
|
||||
}
|
||||
}
|
||||
|
||||
fn me_floor_mode_label(mode: MeFloorMode) -> &'static str {
|
||||
match mode {
|
||||
MeFloorMode::Static => "static",
|
||||
MeFloorMode::Adaptive => "adaptive",
|
||||
}
|
||||
}
|
||||
499
src/api/users.rs
Normal file
499
src/api/users.rs
Normal file
@@ -0,0 +1,499 @@
|
||||
use std::net::IpAddr;
|
||||
|
||||
use hyper::StatusCode;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::stats::Stats;
|
||||
|
||||
use super::ApiShared;
|
||||
use super::config_store::{
|
||||
ensure_expected_revision, load_config_from_disk, save_config_to_disk,
|
||||
};
|
||||
use super::model::{
|
||||
ApiFailure, CreateUserRequest, CreateUserResponse, PatchUserRequest, RotateSecretRequest,
|
||||
UserInfo, UserLinks, is_valid_ad_tag, is_valid_user_secret, is_valid_username,
|
||||
parse_optional_expiration, random_user_secret,
|
||||
};
|
||||
|
||||
pub(super) async fn create_user(
|
||||
body: CreateUserRequest,
|
||||
expected_revision: Option<String>,
|
||||
shared: &ApiShared,
|
||||
) -> Result<(CreateUserResponse, String), ApiFailure> {
|
||||
if !is_valid_username(&body.username) {
|
||||
return Err(ApiFailure::bad_request(
|
||||
"username must match [A-Za-z0-9_.-] and be 1..64 chars",
|
||||
));
|
||||
}
|
||||
|
||||
let secret = match body.secret {
|
||||
Some(secret) => {
|
||||
if !is_valid_user_secret(&secret) {
|
||||
return Err(ApiFailure::bad_request(
|
||||
"secret must be exactly 32 hex characters",
|
||||
));
|
||||
}
|
||||
secret
|
||||
}
|
||||
None => random_user_secret(),
|
||||
};
|
||||
|
||||
if let Some(ad_tag) = body.user_ad_tag.as_ref() && !is_valid_ad_tag(ad_tag) {
|
||||
return Err(ApiFailure::bad_request(
|
||||
"user_ad_tag must be exactly 32 hex characters",
|
||||
));
|
||||
}
|
||||
|
||||
let expiration = parse_optional_expiration(body.expiration_rfc3339.as_deref())?;
|
||||
let _guard = shared.mutation_lock.lock().await;
|
||||
let mut cfg = load_config_from_disk(&shared.config_path).await?;
|
||||
ensure_expected_revision(&shared.config_path, expected_revision.as_deref()).await?;
|
||||
|
||||
if cfg.access.users.contains_key(&body.username) {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::CONFLICT,
|
||||
"user_exists",
|
||||
"User already exists",
|
||||
));
|
||||
}
|
||||
|
||||
cfg.access.users.insert(body.username.clone(), secret.clone());
|
||||
if let Some(ad_tag) = body.user_ad_tag {
|
||||
cfg.access.user_ad_tags.insert(body.username.clone(), ad_tag);
|
||||
}
|
||||
if let Some(limit) = body.max_tcp_conns {
|
||||
cfg.access.user_max_tcp_conns.insert(body.username.clone(), limit);
|
||||
}
|
||||
if let Some(expiration) = expiration {
|
||||
cfg.access
|
||||
.user_expirations
|
||||
.insert(body.username.clone(), expiration);
|
||||
}
|
||||
if let Some(quota) = body.data_quota_bytes {
|
||||
cfg.access.user_data_quota.insert(body.username.clone(), quota);
|
||||
}
|
||||
|
||||
let updated_limit = body.max_unique_ips;
|
||||
if let Some(limit) = updated_limit {
|
||||
cfg.access
|
||||
.user_max_unique_ips
|
||||
.insert(body.username.clone(), limit);
|
||||
}
|
||||
|
||||
cfg.validate()
|
||||
.map_err(|e| ApiFailure::bad_request(format!("config validation failed: {}", e)))?;
|
||||
|
||||
let revision = save_config_to_disk(&shared.config_path, &cfg).await?;
|
||||
drop(_guard);
|
||||
|
||||
if let Some(limit) = updated_limit {
|
||||
shared.ip_tracker.set_user_limit(&body.username, limit).await;
|
||||
}
|
||||
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
shared.startup_detected_ip_v4,
|
||||
shared.startup_detected_ip_v6,
|
||||
)
|
||||
.await;
|
||||
let user = users
|
||||
.into_iter()
|
||||
.find(|entry| entry.username == body.username)
|
||||
.unwrap_or(UserInfo {
|
||||
username: body.username.clone(),
|
||||
user_ad_tag: None,
|
||||
max_tcp_conns: None,
|
||||
expiration_rfc3339: None,
|
||||
data_quota_bytes: None,
|
||||
max_unique_ips: updated_limit,
|
||||
current_connections: 0,
|
||||
active_unique_ips: 0,
|
||||
active_unique_ips_list: Vec::new(),
|
||||
recent_unique_ips: 0,
|
||||
recent_unique_ips_list: Vec::new(),
|
||||
total_octets: 0,
|
||||
links: build_user_links(
|
||||
&cfg,
|
||||
&secret,
|
||||
shared.startup_detected_ip_v4,
|
||||
shared.startup_detected_ip_v6,
|
||||
),
|
||||
});
|
||||
|
||||
Ok((CreateUserResponse { user, secret }, revision))
|
||||
}
|
||||
|
||||
pub(super) async fn patch_user(
|
||||
user: &str,
|
||||
body: PatchUserRequest,
|
||||
expected_revision: Option<String>,
|
||||
shared: &ApiShared,
|
||||
) -> Result<(UserInfo, String), ApiFailure> {
|
||||
if let Some(secret) = body.secret.as_ref() && !is_valid_user_secret(secret) {
|
||||
return Err(ApiFailure::bad_request(
|
||||
"secret must be exactly 32 hex characters",
|
||||
));
|
||||
}
|
||||
if let Some(ad_tag) = body.user_ad_tag.as_ref() && !is_valid_ad_tag(ad_tag) {
|
||||
return Err(ApiFailure::bad_request(
|
||||
"user_ad_tag must be exactly 32 hex characters",
|
||||
));
|
||||
}
|
||||
let expiration = parse_optional_expiration(body.expiration_rfc3339.as_deref())?;
|
||||
let _guard = shared.mutation_lock.lock().await;
|
||||
let mut cfg = load_config_from_disk(&shared.config_path).await?;
|
||||
ensure_expected_revision(&shared.config_path, expected_revision.as_deref()).await?;
|
||||
|
||||
if !cfg.access.users.contains_key(user) {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::NOT_FOUND,
|
||||
"not_found",
|
||||
"User not found",
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(secret) = body.secret {
|
||||
cfg.access.users.insert(user.to_string(), secret);
|
||||
}
|
||||
if let Some(ad_tag) = body.user_ad_tag {
|
||||
cfg.access.user_ad_tags.insert(user.to_string(), ad_tag);
|
||||
}
|
||||
if let Some(limit) = body.max_tcp_conns {
|
||||
cfg.access.user_max_tcp_conns.insert(user.to_string(), limit);
|
||||
}
|
||||
if let Some(expiration) = expiration {
|
||||
cfg.access.user_expirations.insert(user.to_string(), expiration);
|
||||
}
|
||||
if let Some(quota) = body.data_quota_bytes {
|
||||
cfg.access.user_data_quota.insert(user.to_string(), quota);
|
||||
}
|
||||
|
||||
let mut updated_limit = None;
|
||||
if let Some(limit) = body.max_unique_ips {
|
||||
cfg.access.user_max_unique_ips.insert(user.to_string(), limit);
|
||||
updated_limit = Some(limit);
|
||||
}
|
||||
|
||||
cfg.validate()
|
||||
.map_err(|e| ApiFailure::bad_request(format!("config validation failed: {}", e)))?;
|
||||
|
||||
let revision = save_config_to_disk(&shared.config_path, &cfg).await?;
|
||||
drop(_guard);
|
||||
if let Some(limit) = updated_limit {
|
||||
shared.ip_tracker.set_user_limit(user, limit).await;
|
||||
}
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
shared.startup_detected_ip_v4,
|
||||
shared.startup_detected_ip_v6,
|
||||
)
|
||||
.await;
|
||||
let user_info = users
|
||||
.into_iter()
|
||||
.find(|entry| entry.username == user)
|
||||
.ok_or_else(|| ApiFailure::internal("failed to build updated user view"))?;
|
||||
|
||||
Ok((user_info, revision))
|
||||
}
|
||||
|
||||
pub(super) async fn rotate_secret(
|
||||
user: &str,
|
||||
body: RotateSecretRequest,
|
||||
expected_revision: Option<String>,
|
||||
shared: &ApiShared,
|
||||
) -> Result<(CreateUserResponse, String), ApiFailure> {
|
||||
let secret = body.secret.unwrap_or_else(random_user_secret);
|
||||
if !is_valid_user_secret(&secret) {
|
||||
return Err(ApiFailure::bad_request(
|
||||
"secret must be exactly 32 hex characters",
|
||||
));
|
||||
}
|
||||
|
||||
let _guard = shared.mutation_lock.lock().await;
|
||||
let mut cfg = load_config_from_disk(&shared.config_path).await?;
|
||||
ensure_expected_revision(&shared.config_path, expected_revision.as_deref()).await?;
|
||||
|
||||
if !cfg.access.users.contains_key(user) {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::NOT_FOUND,
|
||||
"not_found",
|
||||
"User not found",
|
||||
));
|
||||
}
|
||||
|
||||
cfg.access.users.insert(user.to_string(), secret.clone());
|
||||
cfg.validate()
|
||||
.map_err(|e| ApiFailure::bad_request(format!("config validation failed: {}", e)))?;
|
||||
let revision = save_config_to_disk(&shared.config_path, &cfg).await?;
|
||||
drop(_guard);
|
||||
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
shared.startup_detected_ip_v4,
|
||||
shared.startup_detected_ip_v6,
|
||||
)
|
||||
.await;
|
||||
let user_info = users
|
||||
.into_iter()
|
||||
.find(|entry| entry.username == user)
|
||||
.ok_or_else(|| ApiFailure::internal("failed to build updated user view"))?;
|
||||
|
||||
Ok((
|
||||
CreateUserResponse {
|
||||
user: user_info,
|
||||
secret,
|
||||
},
|
||||
revision,
|
||||
))
|
||||
}
|
||||
|
||||
pub(super) async fn delete_user(
|
||||
user: &str,
|
||||
expected_revision: Option<String>,
|
||||
shared: &ApiShared,
|
||||
) -> Result<(String, String), ApiFailure> {
|
||||
let _guard = shared.mutation_lock.lock().await;
|
||||
let mut cfg = load_config_from_disk(&shared.config_path).await?;
|
||||
ensure_expected_revision(&shared.config_path, expected_revision.as_deref()).await?;
|
||||
|
||||
if !cfg.access.users.contains_key(user) {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::NOT_FOUND,
|
||||
"not_found",
|
||||
"User not found",
|
||||
));
|
||||
}
|
||||
if cfg.access.users.len() <= 1 {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::CONFLICT,
|
||||
"last_user_forbidden",
|
||||
"Cannot delete the last configured user",
|
||||
));
|
||||
}
|
||||
|
||||
cfg.access.users.remove(user);
|
||||
cfg.access.user_ad_tags.remove(user);
|
||||
cfg.access.user_max_tcp_conns.remove(user);
|
||||
cfg.access.user_expirations.remove(user);
|
||||
cfg.access.user_data_quota.remove(user);
|
||||
cfg.access.user_max_unique_ips.remove(user);
|
||||
|
||||
cfg.validate()
|
||||
.map_err(|e| ApiFailure::bad_request(format!("config validation failed: {}", e)))?;
|
||||
let revision = save_config_to_disk(&shared.config_path, &cfg).await?;
|
||||
drop(_guard);
|
||||
shared.ip_tracker.remove_user_limit(user).await;
|
||||
shared.ip_tracker.clear_user_ips(user).await;
|
||||
|
||||
Ok((user.to_string(), revision))
|
||||
}
|
||||
|
||||
pub(super) async fn users_from_config(
|
||||
cfg: &ProxyConfig,
|
||||
stats: &Stats,
|
||||
ip_tracker: &UserIpTracker,
|
||||
startup_detected_ip_v4: Option<IpAddr>,
|
||||
startup_detected_ip_v6: Option<IpAddr>,
|
||||
) -> Vec<UserInfo> {
|
||||
let mut names = cfg.access.users.keys().cloned().collect::<Vec<_>>();
|
||||
names.sort();
|
||||
let active_ip_lists = ip_tracker.get_active_ips_for_users(&names).await;
|
||||
let recent_ip_lists = ip_tracker.get_recent_ips_for_users(&names).await;
|
||||
|
||||
let mut users = Vec::with_capacity(names.len());
|
||||
for username in names {
|
||||
let active_ip_list = active_ip_lists
|
||||
.get(&username)
|
||||
.cloned()
|
||||
.unwrap_or_else(Vec::new);
|
||||
let recent_ip_list = recent_ip_lists
|
||||
.get(&username)
|
||||
.cloned()
|
||||
.unwrap_or_else(Vec::new);
|
||||
let links = cfg
|
||||
.access
|
||||
.users
|
||||
.get(&username)
|
||||
.map(|secret| {
|
||||
build_user_links(
|
||||
cfg,
|
||||
secret,
|
||||
startup_detected_ip_v4,
|
||||
startup_detected_ip_v6,
|
||||
)
|
||||
})
|
||||
.unwrap_or(UserLinks {
|
||||
classic: Vec::new(),
|
||||
secure: Vec::new(),
|
||||
tls: Vec::new(),
|
||||
});
|
||||
users.push(UserInfo {
|
||||
user_ad_tag: cfg.access.user_ad_tags.get(&username).cloned(),
|
||||
max_tcp_conns: cfg.access.user_max_tcp_conns.get(&username).copied(),
|
||||
expiration_rfc3339: cfg
|
||||
.access
|
||||
.user_expirations
|
||||
.get(&username)
|
||||
.map(chrono::DateTime::<chrono::Utc>::to_rfc3339),
|
||||
data_quota_bytes: cfg.access.user_data_quota.get(&username).copied(),
|
||||
max_unique_ips: cfg.access.user_max_unique_ips.get(&username).copied(),
|
||||
current_connections: stats.get_user_curr_connects(&username),
|
||||
active_unique_ips: active_ip_list.len(),
|
||||
active_unique_ips_list: active_ip_list,
|
||||
recent_unique_ips: recent_ip_list.len(),
|
||||
recent_unique_ips_list: recent_ip_list,
|
||||
total_octets: stats.get_user_total_octets(&username),
|
||||
links,
|
||||
username,
|
||||
});
|
||||
}
|
||||
users
|
||||
}
|
||||
|
||||
fn build_user_links(
|
||||
cfg: &ProxyConfig,
|
||||
secret: &str,
|
||||
startup_detected_ip_v4: Option<IpAddr>,
|
||||
startup_detected_ip_v6: Option<IpAddr>,
|
||||
) -> UserLinks {
|
||||
let hosts = resolve_link_hosts(cfg, startup_detected_ip_v4, startup_detected_ip_v6);
|
||||
let port = cfg.general.links.public_port.unwrap_or(cfg.server.port);
|
||||
let tls_domains = resolve_tls_domains(cfg);
|
||||
|
||||
let mut classic = Vec::new();
|
||||
let mut secure = Vec::new();
|
||||
let mut tls = Vec::new();
|
||||
|
||||
for host in &hosts {
|
||||
if cfg.general.modes.classic {
|
||||
classic.push(format!(
|
||||
"tg://proxy?server={}&port={}&secret={}",
|
||||
host, port, secret
|
||||
));
|
||||
}
|
||||
if cfg.general.modes.secure {
|
||||
secure.push(format!(
|
||||
"tg://proxy?server={}&port={}&secret=dd{}",
|
||||
host, port, secret
|
||||
));
|
||||
}
|
||||
if cfg.general.modes.tls {
|
||||
for domain in &tls_domains {
|
||||
let domain_hex = hex::encode(domain);
|
||||
tls.push(format!(
|
||||
"tg://proxy?server={}&port={}&secret=ee{}{}",
|
||||
host, port, secret, domain_hex
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
UserLinks {
|
||||
classic,
|
||||
secure,
|
||||
tls,
|
||||
}
|
||||
}
|
||||
|
||||
fn resolve_link_hosts(
|
||||
cfg: &ProxyConfig,
|
||||
startup_detected_ip_v4: Option<IpAddr>,
|
||||
startup_detected_ip_v6: Option<IpAddr>,
|
||||
) -> Vec<String> {
|
||||
if let Some(host) = cfg
|
||||
.general
|
||||
.links
|
||||
.public_host
|
||||
.as_deref()
|
||||
.map(str::trim)
|
||||
.filter(|value| !value.is_empty())
|
||||
{
|
||||
return vec![host.to_string()];
|
||||
}
|
||||
|
||||
let mut startup_hosts = Vec::new();
|
||||
if let Some(ip) = startup_detected_ip_v4 {
|
||||
push_unique_host(&mut startup_hosts, &ip.to_string());
|
||||
}
|
||||
if let Some(ip) = startup_detected_ip_v6 {
|
||||
push_unique_host(&mut startup_hosts, &ip.to_string());
|
||||
}
|
||||
if !startup_hosts.is_empty() {
|
||||
return startup_hosts;
|
||||
}
|
||||
|
||||
let mut hosts = Vec::new();
|
||||
for listener in &cfg.server.listeners {
|
||||
if let Some(host) = listener
|
||||
.announce
|
||||
.as_deref()
|
||||
.map(str::trim)
|
||||
.filter(|value| !value.is_empty())
|
||||
{
|
||||
push_unique_host(&mut hosts, host);
|
||||
continue;
|
||||
}
|
||||
if let Some(ip) = listener.announce_ip {
|
||||
if !ip.is_unspecified() {
|
||||
push_unique_host(&mut hosts, &ip.to_string());
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if !listener.ip.is_unspecified() {
|
||||
push_unique_host(&mut hosts, &listener.ip.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
if hosts.is_empty() {
|
||||
if let Some(host) = cfg.server.listen_addr_ipv4.as_deref() {
|
||||
push_host_from_legacy_listen(&mut hosts, host);
|
||||
}
|
||||
if let Some(host) = cfg.server.listen_addr_ipv6.as_deref() {
|
||||
push_host_from_legacy_listen(&mut hosts, host);
|
||||
}
|
||||
}
|
||||
|
||||
hosts
|
||||
}
|
||||
|
||||
fn push_host_from_legacy_listen(hosts: &mut Vec<String>, raw: &str) {
|
||||
let candidate = raw.trim();
|
||||
if candidate.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
match candidate.parse::<IpAddr>() {
|
||||
Ok(ip) if ip.is_unspecified() => {}
|
||||
Ok(ip) => push_unique_host(hosts, &ip.to_string()),
|
||||
Err(_) => push_unique_host(hosts, candidate),
|
||||
}
|
||||
}
|
||||
|
||||
fn push_unique_host(hosts: &mut Vec<String>, candidate: &str) {
|
||||
if !hosts.iter().any(|existing| existing == candidate) {
|
||||
hosts.push(candidate.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
fn resolve_tls_domains(cfg: &ProxyConfig) -> Vec<&str> {
|
||||
let mut domains = Vec::with_capacity(1 + cfg.censorship.tls_domains.len());
|
||||
let primary = cfg.censorship.tls_domain.as_str();
|
||||
if !primary.is_empty() {
|
||||
domains.push(primary);
|
||||
}
|
||||
for domain in &cfg.censorship.tls_domains {
|
||||
let value = domain.as_str();
|
||||
if value.is_empty() || domains.contains(&value) {
|
||||
continue;
|
||||
}
|
||||
domains.push(value);
|
||||
}
|
||||
domains
|
||||
}
|
||||
17
src/cli.rs
17
src/cli.rs
@@ -189,10 +189,23 @@ r#"# Telemt MTProxy — auto-generated config
|
||||
show_link = ["{username}"]
|
||||
|
||||
[general]
|
||||
# prefer_ipv6 is deprecated; use [network].prefer
|
||||
prefer_ipv6 = false
|
||||
fast_mode = true
|
||||
use_middle_proxy = false
|
||||
log_level = "normal"
|
||||
desync_all_full = false
|
||||
update_every = 43200
|
||||
hardswap = false
|
||||
me_pool_drain_ttl_secs = 90
|
||||
me_pool_min_fresh_ratio = 0.8
|
||||
me_reinit_drain_timeout_secs = 120
|
||||
|
||||
[network]
|
||||
ipv4 = true
|
||||
ipv6 = true
|
||||
prefer = 4
|
||||
multipath = false
|
||||
|
||||
[general.modes]
|
||||
classic = false
|
||||
@@ -206,6 +219,7 @@ listen_addr_ipv6 = "::"
|
||||
|
||||
[[server.listeners]]
|
||||
ip = "0.0.0.0"
|
||||
# reuse_allow = false # Set true only when intentionally running multiple telemt instances on same port
|
||||
|
||||
[[server.listeners]]
|
||||
ip = "::"
|
||||
@@ -221,6 +235,7 @@ tls_domain = "{domain}"
|
||||
mask = true
|
||||
mask_port = 443
|
||||
fake_cert_len = 2048
|
||||
tls_full_cert_ttl_secs = 90
|
||||
|
||||
[access]
|
||||
replay_check_len = 65536
|
||||
@@ -297,4 +312,4 @@ fn print_links(username: &str, secret: &str, port: u16, domain: &str) {
|
||||
println!("The proxy will auto-detect and display the correct link on startup.");
|
||||
println!("Check: journalctl -u telemt.service | head -30");
|
||||
println!("===================");
|
||||
}
|
||||
}
|
||||
|
||||
546
src/config/defaults.rs
Normal file
546
src/config/defaults.rs
Normal file
@@ -0,0 +1,546 @@
|
||||
use std::collections::HashMap;
|
||||
use ipnetwork::IpNetwork;
|
||||
use serde::Deserialize;
|
||||
|
||||
// Helper defaults kept private to the config module.
|
||||
const DEFAULT_NETWORK_IPV6: Option<bool> = Some(false);
|
||||
const DEFAULT_STUN_TCP_FALLBACK: bool = true;
|
||||
const DEFAULT_MIDDLE_PROXY_WARM_STANDBY: usize = 16;
|
||||
const DEFAULT_ME_RECONNECT_MAX_CONCURRENT_PER_DC: u32 = 8;
|
||||
const DEFAULT_ME_RECONNECT_FAST_RETRY_COUNT: u32 = 16;
|
||||
const DEFAULT_ME_SINGLE_ENDPOINT_SHADOW_WRITERS: u8 = 2;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_IDLE_SECS: u64 = 90;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MIN_WRITERS_SINGLE_ENDPOINT: u8 = 1;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_RECOVER_GRACE_SECS: u64 = 180;
|
||||
const DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS: u64 = 30;
|
||||
const DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS: u32 = 2;
|
||||
const DEFAULT_UPSTREAM_UNHEALTHY_FAIL_THRESHOLD: u32 = 5;
|
||||
const DEFAULT_UPSTREAM_CONNECT_BUDGET_MS: u64 = 3000;
|
||||
const DEFAULT_LISTEN_ADDR_IPV6: &str = "::";
|
||||
const DEFAULT_ACCESS_USER: &str = "default";
|
||||
const DEFAULT_ACCESS_SECRET: &str = "00000000000000000000000000000000";
|
||||
|
||||
pub(crate) fn default_true() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_port() -> u16 {
|
||||
443
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_domain() -> String {
|
||||
"petrovich.ru".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_port() -> u16 {
|
||||
443
|
||||
}
|
||||
|
||||
pub(crate) fn default_fake_cert_len() -> usize {
|
||||
2048
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_front_dir() -> String {
|
||||
"tlsfront".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_replay_check_len() -> usize {
|
||||
65_536
|
||||
}
|
||||
|
||||
pub(crate) fn default_replay_window_secs() -> u64 {
|
||||
1800
|
||||
}
|
||||
|
||||
pub(crate) fn default_handshake_timeout() -> u64 {
|
||||
30
|
||||
}
|
||||
|
||||
pub(crate) fn default_connect_timeout() -> u64 {
|
||||
10
|
||||
}
|
||||
|
||||
pub(crate) fn default_keepalive() -> u64 {
|
||||
60
|
||||
}
|
||||
|
||||
pub(crate) fn default_ack_timeout() -> u64 {
|
||||
300
|
||||
}
|
||||
pub(crate) fn default_me_one_retry() -> u8 {
|
||||
12
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_one_timeout() -> u64 {
|
||||
1200
|
||||
}
|
||||
|
||||
pub(crate) fn default_listen_addr() -> String {
|
||||
"0.0.0.0".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_listen_addr_ipv4() -> Option<String> {
|
||||
Some(default_listen_addr())
|
||||
}
|
||||
|
||||
pub(crate) fn default_weight() -> u16 {
|
||||
1
|
||||
}
|
||||
|
||||
pub(crate) fn default_metrics_whitelist() -> Vec<IpNetwork> {
|
||||
vec![
|
||||
"127.0.0.1/32".parse().unwrap(),
|
||||
"::1/128".parse().unwrap(),
|
||||
]
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_listen() -> String {
|
||||
"127.0.0.1:9091".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_whitelist() -> Vec<IpNetwork> {
|
||||
default_metrics_whitelist()
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_request_body_limit_bytes() -> usize {
|
||||
64 * 1024
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_minimal_runtime_enabled() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_minimal_runtime_cache_ttl_ms() -> u64 {
|
||||
1000
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_runtime_edge_enabled() -> bool { false }
|
||||
pub(crate) fn default_api_runtime_edge_cache_ttl_ms() -> u64 { 1000 }
|
||||
pub(crate) fn default_api_runtime_edge_top_n() -> usize { 10 }
|
||||
pub(crate) fn default_api_runtime_edge_events_capacity() -> usize { 256 }
|
||||
|
||||
pub(crate) fn default_proxy_protocol_header_timeout_ms() -> u64 {
|
||||
500
|
||||
}
|
||||
|
||||
pub(crate) fn default_prefer_4() -> u8 {
|
||||
4
|
||||
}
|
||||
|
||||
pub(crate) fn default_network_ipv6() -> Option<bool> {
|
||||
DEFAULT_NETWORK_IPV6
|
||||
}
|
||||
|
||||
pub(crate) fn default_stun_tcp_fallback() -> bool {
|
||||
DEFAULT_STUN_TCP_FALLBACK
|
||||
}
|
||||
|
||||
pub(crate) fn default_unknown_dc_log_path() -> Option<String> {
|
||||
Some("unknown-dc.txt".to_string())
|
||||
}
|
||||
|
||||
pub(crate) fn default_unknown_dc_file_log_enabled() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_pool_size() -> usize {
|
||||
8
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_secret_path() -> Option<String> {
|
||||
Some("proxy-secret".to_string())
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_config_v4_cache_path() -> Option<String> {
|
||||
Some("cache/proxy-config-v4.txt".to_string())
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_config_v6_cache_path() -> Option<String> {
|
||||
Some("cache/proxy-config-v6.txt".to_string())
|
||||
}
|
||||
|
||||
pub(crate) fn default_middle_proxy_nat_stun() -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
||||
pub(crate) fn default_middle_proxy_nat_stun_servers() -> Vec<String> {
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
pub(crate) fn default_stun_nat_probe_concurrency() -> usize {
|
||||
8
|
||||
}
|
||||
|
||||
pub(crate) fn default_middle_proxy_warm_standby() -> usize {
|
||||
DEFAULT_MIDDLE_PROXY_WARM_STANDBY
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_init_retry_attempts() -> u32 {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_me2dc_fallback() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_keepalive_interval() -> u64 {
|
||||
8
|
||||
}
|
||||
|
||||
pub(crate) fn default_keepalive_jitter() -> u64 {
|
||||
2
|
||||
}
|
||||
|
||||
pub(crate) fn default_warmup_step_delay_ms() -> u64 {
|
||||
500
|
||||
}
|
||||
|
||||
pub(crate) fn default_warmup_step_jitter_ms() -> u64 {
|
||||
300
|
||||
}
|
||||
|
||||
pub(crate) fn default_reconnect_backoff_base_ms() -> u64 {
|
||||
500
|
||||
}
|
||||
|
||||
pub(crate) fn default_reconnect_backoff_cap_ms() -> u64 {
|
||||
30_000
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_reconnect_max_concurrent_per_dc() -> u32 {
|
||||
DEFAULT_ME_RECONNECT_MAX_CONCURRENT_PER_DC
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_reconnect_fast_retry_count() -> u32 {
|
||||
DEFAULT_ME_RECONNECT_FAST_RETRY_COUNT
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_single_endpoint_shadow_writers() -> u8 {
|
||||
DEFAULT_ME_SINGLE_ENDPOINT_SHADOW_WRITERS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_single_endpoint_outage_mode_enabled() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_single_endpoint_outage_disable_quarantine() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_single_endpoint_outage_backoff_min_ms() -> u64 {
|
||||
250
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_single_endpoint_outage_backoff_max_ms() -> u64 {
|
||||
3000
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_single_endpoint_shadow_rotate_every_secs() -> u64 {
|
||||
900
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_idle_secs() -> u64 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_IDLE_SECS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_min_writers_single_endpoint() -> u8 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MIN_WRITERS_SINGLE_ENDPOINT
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_recover_grace_secs() -> u64 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_RECOVER_GRACE_SECS
|
||||
}
|
||||
|
||||
pub(crate) fn default_upstream_connect_retry_attempts() -> u32 {
|
||||
DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS
|
||||
}
|
||||
|
||||
pub(crate) fn default_upstream_connect_retry_backoff_ms() -> u64 {
|
||||
100
|
||||
}
|
||||
|
||||
pub(crate) fn default_upstream_unhealthy_fail_threshold() -> u32 {
|
||||
DEFAULT_UPSTREAM_UNHEALTHY_FAIL_THRESHOLD
|
||||
}
|
||||
|
||||
pub(crate) fn default_upstream_connect_budget_ms() -> u64 {
|
||||
DEFAULT_UPSTREAM_CONNECT_BUDGET_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_upstream_connect_failfast_hard_errors() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_rpc_proxy_req_every() -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_crypto_pending_buffer() -> usize {
|
||||
256 * 1024
|
||||
}
|
||||
|
||||
pub(crate) fn default_max_client_frame() -> usize {
|
||||
16 * 1024 * 1024
|
||||
}
|
||||
|
||||
pub(crate) fn default_desync_all_full() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_backpressure_base_timeout_ms() -> u64 {
|
||||
25
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_backpressure_high_timeout_ms() -> u64 {
|
||||
120
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_backpressure_high_watermark_pct() -> u8 {
|
||||
80
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_no_writer_wait_ms() -> u64 {
|
||||
250
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_inline_recovery_attempts() -> u32 {
|
||||
3
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_inline_recovery_wait_ms() -> u64 {
|
||||
3000
|
||||
}
|
||||
|
||||
pub(crate) fn default_beobachten_minutes() -> u64 {
|
||||
10
|
||||
}
|
||||
|
||||
pub(crate) fn default_beobachten_flush_secs() -> u64 {
|
||||
15
|
||||
}
|
||||
|
||||
pub(crate) fn default_beobachten_file() -> String {
|
||||
"cache/beobachten.txt".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_new_session_tickets() -> u8 {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_full_cert_ttl_secs() -> u64 {
|
||||
90
|
||||
}
|
||||
|
||||
pub(crate) fn default_server_hello_delay_min_ms() -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_server_hello_delay_max_ms() -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_alpn_enforce() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_stun_servers() -> Vec<String> {
|
||||
vec![
|
||||
"stun.l.google.com:5349".to_string(),
|
||||
"stun1.l.google.com:3478".to_string(),
|
||||
"stun.gmx.net:3478".to_string(),
|
||||
"stun.l.google.com:19302".to_string(),
|
||||
"stun.1und1.de:3478".to_string(),
|
||||
"stun1.l.google.com:19302".to_string(),
|
||||
"stun2.l.google.com:19302".to_string(),
|
||||
"stun3.l.google.com:19302".to_string(),
|
||||
"stun4.l.google.com:19302".to_string(),
|
||||
"stun.services.mozilla.com:3478".to_string(),
|
||||
"stun.stunprotocol.org:3478".to_string(),
|
||||
"stun.nextcloud.com:3478".to_string(),
|
||||
"stun.voip.eutelia.it:3478".to_string(),
|
||||
]
|
||||
}
|
||||
|
||||
pub(crate) fn default_http_ip_detect_urls() -> Vec<String> {
|
||||
vec![
|
||||
"https://ifconfig.me/ip".to_string(),
|
||||
"https://api.ipify.org".to_string(),
|
||||
]
|
||||
}
|
||||
|
||||
pub(crate) fn default_cache_public_ip_path() -> String {
|
||||
"cache/public_ip.txt".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_secret_reload_secs() -> u64 {
|
||||
60 * 60
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_config_reload_secs() -> u64 {
|
||||
60 * 60
|
||||
}
|
||||
|
||||
pub(crate) fn default_update_every_secs() -> u64 {
|
||||
5 * 60
|
||||
}
|
||||
|
||||
pub(crate) fn default_update_every() -> Option<u64> {
|
||||
Some(default_update_every_secs())
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_reinit_every_secs() -> u64 {
|
||||
15 * 60
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_reinit_singleflight() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_reinit_trigger_channel() -> usize {
|
||||
64
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_reinit_coalesce_window_ms() -> u64 {
|
||||
200
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_hardswap_warmup_delay_min_ms() -> u64 {
|
||||
1000
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_hardswap_warmup_delay_max_ms() -> u64 {
|
||||
2000
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_hardswap_warmup_extra_passes() -> u8 {
|
||||
3
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_hardswap_warmup_pass_backoff_base_ms() -> u64 {
|
||||
500
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_config_stable_snapshots() -> u8 {
|
||||
2
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_config_apply_cooldown_secs() -> u64 {
|
||||
300
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_snapshot_require_http_2xx() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_snapshot_reject_empty_map() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_snapshot_min_proxy_for_lines() -> u32 {
|
||||
1
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_secret_stable_snapshots() -> u8 {
|
||||
2
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_secret_rotate_runtime() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_secret_atomic_snapshot() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_secret_len_max() -> usize {
|
||||
256
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_reinit_drain_timeout_secs() -> u64 {
|
||||
120
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_ttl_secs() -> u64 {
|
||||
90
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_bind_stale_ttl_secs() -> u64 {
|
||||
default_me_pool_drain_ttl_secs()
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_min_fresh_ratio() -> f32 {
|
||||
0.8
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_deterministic_writer_sort() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_hardswap() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_ntp_check() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_ntp_servers() -> Vec<String> {
|
||||
vec!["pool.ntp.org".to_string()]
|
||||
}
|
||||
|
||||
pub(crate) fn default_fast_mode_min_tls_record() -> usize {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_degradation_min_unavailable_dc_groups() -> u8 {
|
||||
2
|
||||
}
|
||||
|
||||
pub(crate) fn default_listen_addr_ipv6() -> String {
|
||||
DEFAULT_LISTEN_ADDR_IPV6.to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_listen_addr_ipv6_opt() -> Option<String> {
|
||||
Some(default_listen_addr_ipv6())
|
||||
}
|
||||
|
||||
pub(crate) fn default_access_users() -> HashMap<String, String> {
|
||||
HashMap::from([(
|
||||
DEFAULT_ACCESS_USER.to_string(),
|
||||
DEFAULT_ACCESS_SECRET.to_string(),
|
||||
)])
|
||||
}
|
||||
|
||||
pub(crate) fn default_user_max_unique_ips_window_secs() -> u64 {
|
||||
DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS
|
||||
}
|
||||
|
||||
// Custom deserializer helpers
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub(crate) enum OneOrMany {
|
||||
One(String),
|
||||
Many(Vec<String>),
|
||||
}
|
||||
|
||||
pub(crate) fn deserialize_dc_overrides<'de, D>(
|
||||
deserializer: D,
|
||||
) -> std::result::Result<HashMap<String, Vec<String>>, D::Error>
|
||||
where
|
||||
D: serde::de::Deserializer<'de>,
|
||||
{
|
||||
let raw: HashMap<String, OneOrMany> = HashMap::deserialize(deserializer)?;
|
||||
let mut out = HashMap::new();
|
||||
for (dc, val) in raw {
|
||||
let mut addrs = match val {
|
||||
OneOrMany::One(s) => vec![s],
|
||||
OneOrMany::Many(v) => v,
|
||||
};
|
||||
addrs.retain(|s| !s.trim().is_empty());
|
||||
if !addrs.is_empty() {
|
||||
out.insert(dc, addrs);
|
||||
}
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
1066
src/config/hot_reload.rs
Normal file
1066
src/config/hot_reload.rs
Normal file
File diff suppressed because it is too large
Load Diff
1808
src/config/load.rs
Normal file
1808
src/config/load.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,518 +1,9 @@
|
||||
//! Configuration
|
||||
//! Configuration.
|
||||
|
||||
use crate::error::{ProxyError, Result};
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::path::Path;
|
||||
pub(crate) mod defaults;
|
||||
mod types;
|
||||
mod load;
|
||||
pub mod hot_reload;
|
||||
|
||||
// ============= Helper Defaults =============
|
||||
|
||||
fn default_true() -> bool {
|
||||
true
|
||||
}
|
||||
fn default_port() -> u16 {
|
||||
443
|
||||
}
|
||||
fn default_tls_domain() -> String {
|
||||
"www.google.com".to_string()
|
||||
}
|
||||
fn default_mask_port() -> u16 {
|
||||
443
|
||||
}
|
||||
fn default_replay_check_len() -> usize {
|
||||
65536
|
||||
}
|
||||
fn default_replay_window_secs() -> u64 {
|
||||
1800
|
||||
}
|
||||
fn default_handshake_timeout() -> u64 {
|
||||
15
|
||||
}
|
||||
fn default_connect_timeout() -> u64 {
|
||||
10
|
||||
}
|
||||
fn default_keepalive() -> u64 {
|
||||
60
|
||||
}
|
||||
fn default_ack_timeout() -> u64 {
|
||||
300
|
||||
}
|
||||
fn default_listen_addr() -> String {
|
||||
"0.0.0.0".to_string()
|
||||
}
|
||||
fn default_fake_cert_len() -> usize {
|
||||
2048
|
||||
}
|
||||
fn default_weight() -> u16 {
|
||||
1
|
||||
}
|
||||
fn default_metrics_whitelist() -> Vec<IpAddr> {
|
||||
vec!["127.0.0.1".parse().unwrap(), "::1".parse().unwrap()]
|
||||
}
|
||||
|
||||
// ============= Log Level =============
|
||||
|
||||
/// Logging verbosity level
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum LogLevel {
|
||||
/// All messages including trace (trace + debug + info + warn + error)
|
||||
Debug,
|
||||
/// Detailed operational logs (debug + info + warn + error)
|
||||
Verbose,
|
||||
/// Standard operational logs (info + warn + error)
|
||||
#[default]
|
||||
Normal,
|
||||
/// Minimal output: only warnings and errors (warn + error).
|
||||
/// Startup messages (config, DC connectivity, proxy links) are always shown
|
||||
/// via info! before the filter is applied.
|
||||
Silent,
|
||||
}
|
||||
|
||||
impl LogLevel {
|
||||
/// Convert to tracing EnvFilter directive string
|
||||
pub fn to_filter_str(&self) -> &'static str {
|
||||
match self {
|
||||
LogLevel::Debug => "trace",
|
||||
LogLevel::Verbose => "debug",
|
||||
LogLevel::Normal => "info",
|
||||
LogLevel::Silent => "warn",
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse from a loose string (CLI argument)
|
||||
pub fn from_str_loose(s: &str) -> Self {
|
||||
match s.to_lowercase().as_str() {
|
||||
"debug" | "trace" => LogLevel::Debug,
|
||||
"verbose" => LogLevel::Verbose,
|
||||
"normal" | "info" => LogLevel::Normal,
|
||||
"silent" | "quiet" | "error" | "warn" => LogLevel::Silent,
|
||||
_ => LogLevel::Normal,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for LogLevel {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
LogLevel::Debug => write!(f, "debug"),
|
||||
LogLevel::Verbose => write!(f, "verbose"),
|
||||
LogLevel::Normal => write!(f, "normal"),
|
||||
LogLevel::Silent => write!(f, "silent"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============= Sub-Configs =============
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ProxyModes {
|
||||
#[serde(default)]
|
||||
pub classic: bool,
|
||||
#[serde(default)]
|
||||
pub secure: bool,
|
||||
#[serde(default = "default_true")]
|
||||
pub tls: bool,
|
||||
}
|
||||
|
||||
impl Default for ProxyModes {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
classic: true,
|
||||
secure: true,
|
||||
tls: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GeneralConfig {
|
||||
#[serde(default)]
|
||||
pub modes: ProxyModes,
|
||||
|
||||
#[serde(default)]
|
||||
pub prefer_ipv6: bool,
|
||||
|
||||
#[serde(default = "default_true")]
|
||||
pub fast_mode: bool,
|
||||
|
||||
#[serde(default)]
|
||||
pub use_middle_proxy: bool,
|
||||
|
||||
#[serde(default)]
|
||||
pub ad_tag: Option<String>,
|
||||
|
||||
/// Path to proxy-secret binary file (auto-downloaded if absent).
|
||||
/// Infrastructure secret from https://core.telegram.org/getProxySecret
|
||||
#[serde(default)]
|
||||
pub proxy_secret_path: Option<String>,
|
||||
|
||||
/// Public IP override for middle-proxy NAT environments.
|
||||
/// When set, this IP is used in ME key derivation and RPC_PROXY_REQ "our_addr".
|
||||
#[serde(default)]
|
||||
pub middle_proxy_nat_ip: Option<IpAddr>,
|
||||
|
||||
/// Enable STUN-based NAT probing to discover public IP:port for ME KDF.
|
||||
#[serde(default)]
|
||||
pub middle_proxy_nat_probe: bool,
|
||||
|
||||
/// Optional STUN server address (host:port) for NAT probing.
|
||||
#[serde(default)]
|
||||
pub middle_proxy_nat_stun: Option<String>,
|
||||
|
||||
#[serde(default)]
|
||||
pub log_level: LogLevel,
|
||||
}
|
||||
|
||||
impl Default for GeneralConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
modes: ProxyModes::default(),
|
||||
prefer_ipv6: false,
|
||||
fast_mode: true,
|
||||
use_middle_proxy: false,
|
||||
ad_tag: None,
|
||||
proxy_secret_path: None,
|
||||
middle_proxy_nat_ip: None,
|
||||
middle_proxy_nat_probe: false,
|
||||
middle_proxy_nat_stun: None,
|
||||
log_level: LogLevel::Normal,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ServerConfig {
|
||||
#[serde(default = "default_port")]
|
||||
pub port: u16,
|
||||
|
||||
#[serde(default = "default_listen_addr")]
|
||||
pub listen_addr_ipv4: String,
|
||||
|
||||
#[serde(default)]
|
||||
pub listen_addr_ipv6: Option<String>,
|
||||
|
||||
#[serde(default)]
|
||||
pub listen_unix_sock: Option<String>,
|
||||
|
||||
#[serde(default)]
|
||||
pub metrics_port: Option<u16>,
|
||||
|
||||
#[serde(default = "default_metrics_whitelist")]
|
||||
pub metrics_whitelist: Vec<IpAddr>,
|
||||
|
||||
#[serde(default)]
|
||||
pub listeners: Vec<ListenerConfig>,
|
||||
}
|
||||
|
||||
impl Default for ServerConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
port: default_port(),
|
||||
listen_addr_ipv4: default_listen_addr(),
|
||||
listen_addr_ipv6: Some("::".to_string()),
|
||||
listen_unix_sock: None,
|
||||
metrics_port: None,
|
||||
metrics_whitelist: default_metrics_whitelist(),
|
||||
listeners: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TimeoutsConfig {
|
||||
#[serde(default = "default_handshake_timeout")]
|
||||
pub client_handshake: u64,
|
||||
|
||||
#[serde(default = "default_connect_timeout")]
|
||||
pub tg_connect: u64,
|
||||
|
||||
#[serde(default = "default_keepalive")]
|
||||
pub client_keepalive: u64,
|
||||
|
||||
#[serde(default = "default_ack_timeout")]
|
||||
pub client_ack: u64,
|
||||
}
|
||||
|
||||
impl Default for TimeoutsConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
client_handshake: default_handshake_timeout(),
|
||||
tg_connect: default_connect_timeout(),
|
||||
client_keepalive: default_keepalive(),
|
||||
client_ack: default_ack_timeout(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AntiCensorshipConfig {
|
||||
#[serde(default = "default_tls_domain")]
|
||||
pub tls_domain: String,
|
||||
|
||||
#[serde(default = "default_true")]
|
||||
pub mask: bool,
|
||||
|
||||
#[serde(default)]
|
||||
pub mask_host: Option<String>,
|
||||
|
||||
#[serde(default = "default_mask_port")]
|
||||
pub mask_port: u16,
|
||||
|
||||
#[serde(default)]
|
||||
pub mask_unix_sock: Option<String>,
|
||||
|
||||
#[serde(default = "default_fake_cert_len")]
|
||||
pub fake_cert_len: usize,
|
||||
}
|
||||
|
||||
impl Default for AntiCensorshipConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
tls_domain: default_tls_domain(),
|
||||
mask: true,
|
||||
mask_host: None,
|
||||
mask_port: default_mask_port(),
|
||||
mask_unix_sock: None,
|
||||
fake_cert_len: default_fake_cert_len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AccessConfig {
|
||||
#[serde(default)]
|
||||
pub users: HashMap<String, String>,
|
||||
|
||||
#[serde(default)]
|
||||
pub user_max_tcp_conns: HashMap<String, usize>,
|
||||
|
||||
#[serde(default)]
|
||||
pub user_expirations: HashMap<String, DateTime<Utc>>,
|
||||
|
||||
#[serde(default)]
|
||||
pub user_data_quota: HashMap<String, u64>,
|
||||
|
||||
#[serde(default = "default_replay_check_len")]
|
||||
pub replay_check_len: usize,
|
||||
|
||||
#[serde(default = "default_replay_window_secs")]
|
||||
pub replay_window_secs: u64,
|
||||
|
||||
#[serde(default)]
|
||||
pub ignore_time_skew: bool,
|
||||
}
|
||||
|
||||
impl Default for AccessConfig {
|
||||
fn default() -> Self {
|
||||
let mut users = HashMap::new();
|
||||
users.insert(
|
||||
"default".to_string(),
|
||||
"00000000000000000000000000000000".to_string(),
|
||||
);
|
||||
Self {
|
||||
users,
|
||||
user_max_tcp_conns: HashMap::new(),
|
||||
user_expirations: HashMap::new(),
|
||||
user_data_quota: HashMap::new(),
|
||||
replay_check_len: default_replay_check_len(),
|
||||
replay_window_secs: default_replay_window_secs(),
|
||||
ignore_time_skew: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============= Aux Structures =============
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(tag = "type", rename_all = "lowercase")]
|
||||
pub enum UpstreamType {
|
||||
Direct {
|
||||
#[serde(default)]
|
||||
interface: Option<String>,
|
||||
},
|
||||
Socks4 {
|
||||
address: String,
|
||||
#[serde(default)]
|
||||
interface: Option<String>,
|
||||
#[serde(default)]
|
||||
user_id: Option<String>,
|
||||
},
|
||||
Socks5 {
|
||||
address: String,
|
||||
#[serde(default)]
|
||||
interface: Option<String>,
|
||||
#[serde(default)]
|
||||
username: Option<String>,
|
||||
#[serde(default)]
|
||||
password: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UpstreamConfig {
|
||||
#[serde(flatten)]
|
||||
pub upstream_type: UpstreamType,
|
||||
#[serde(default = "default_weight")]
|
||||
pub weight: u16,
|
||||
#[serde(default = "default_true")]
|
||||
pub enabled: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ListenerConfig {
|
||||
pub ip: IpAddr,
|
||||
#[serde(default)]
|
||||
pub announce_ip: Option<IpAddr>,
|
||||
}
|
||||
|
||||
// ============= Main Config =============
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct ProxyConfig {
|
||||
#[serde(default)]
|
||||
pub general: GeneralConfig,
|
||||
|
||||
#[serde(default)]
|
||||
pub server: ServerConfig,
|
||||
|
||||
#[serde(default)]
|
||||
pub timeouts: TimeoutsConfig,
|
||||
|
||||
#[serde(default)]
|
||||
pub censorship: AntiCensorshipConfig,
|
||||
|
||||
#[serde(default)]
|
||||
pub access: AccessConfig,
|
||||
|
||||
#[serde(default)]
|
||||
pub upstreams: Vec<UpstreamConfig>,
|
||||
|
||||
#[serde(default)]
|
||||
pub show_link: Vec<String>,
|
||||
|
||||
/// DC address overrides for non-standard DCs (CDN, media, test, etc.)
|
||||
/// Keys are DC indices as strings, values are "ip:port" addresses.
|
||||
/// Matches the C implementation's `proxy_for <dc_id> <ip>:<port>` config directive.
|
||||
/// Example in config.toml:
|
||||
/// [dc_overrides]
|
||||
/// "203" = "149.154.175.100:443"
|
||||
#[serde(default)]
|
||||
pub dc_overrides: HashMap<String, String>,
|
||||
|
||||
/// Default DC index (1-5) for unmapped non-standard DCs.
|
||||
/// Matches the C implementation's `default <dc_id>` config directive.
|
||||
/// If not set, defaults to 2 (matching Telegram's official `default 2;` in proxy-multi.conf).
|
||||
#[serde(default)]
|
||||
pub default_dc: Option<u8>,
|
||||
}
|
||||
|
||||
impl ProxyConfig {
|
||||
pub fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
|
||||
let content =
|
||||
std::fs::read_to_string(path).map_err(|e| ProxyError::Config(e.to_string()))?;
|
||||
|
||||
let mut config: ProxyConfig =
|
||||
toml::from_str(&content).map_err(|e| ProxyError::Config(e.to_string()))?;
|
||||
|
||||
// Validate secrets
|
||||
for (user, secret) in &config.access.users {
|
||||
if !secret.chars().all(|c| c.is_ascii_hexdigit()) || secret.len() != 32 {
|
||||
return Err(ProxyError::InvalidSecret {
|
||||
user: user.clone(),
|
||||
reason: "Must be 32 hex characters".to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Validate tls_domain
|
||||
if config.censorship.tls_domain.is_empty() {
|
||||
return Err(ProxyError::Config("tls_domain cannot be empty".to_string()));
|
||||
}
|
||||
|
||||
// Validate mask_unix_sock
|
||||
if let Some(ref sock_path) = config.censorship.mask_unix_sock {
|
||||
if sock_path.is_empty() {
|
||||
return Err(ProxyError::Config(
|
||||
"mask_unix_sock cannot be empty".to_string(),
|
||||
));
|
||||
}
|
||||
#[cfg(unix)]
|
||||
if sock_path.len() > 107 {
|
||||
return Err(ProxyError::Config(format!(
|
||||
"mask_unix_sock path too long: {} bytes (max 107)",
|
||||
sock_path.len()
|
||||
)));
|
||||
}
|
||||
#[cfg(not(unix))]
|
||||
return Err(ProxyError::Config(
|
||||
"mask_unix_sock is only supported on Unix platforms".to_string(),
|
||||
));
|
||||
|
||||
if config.censorship.mask_host.is_some() {
|
||||
return Err(ProxyError::Config(
|
||||
"mask_unix_sock and mask_host are mutually exclusive".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Default mask_host to tls_domain if not set and no unix socket configured
|
||||
if config.censorship.mask_host.is_none() && config.censorship.mask_unix_sock.is_none() {
|
||||
config.censorship.mask_host = Some(config.censorship.tls_domain.clone());
|
||||
}
|
||||
|
||||
// Random fake_cert_len
|
||||
use rand::Rng;
|
||||
config.censorship.fake_cert_len = rand::rng().gen_range(1024..4096);
|
||||
|
||||
// Migration: Populate listeners if empty
|
||||
if config.server.listeners.is_empty() {
|
||||
if let Ok(ipv4) = config.server.listen_addr_ipv4.parse::<IpAddr>() {
|
||||
config.server.listeners.push(ListenerConfig {
|
||||
ip: ipv4,
|
||||
announce_ip: None,
|
||||
});
|
||||
}
|
||||
if let Some(ipv6_str) = &config.server.listen_addr_ipv6 {
|
||||
if let Ok(ipv6) = ipv6_str.parse::<IpAddr>() {
|
||||
config.server.listeners.push(ListenerConfig {
|
||||
ip: ipv6,
|
||||
announce_ip: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Migration: Populate upstreams if empty (Default Direct)
|
||||
if config.upstreams.is_empty() {
|
||||
config.upstreams.push(UpstreamConfig {
|
||||
upstream_type: UpstreamType::Direct { interface: None },
|
||||
weight: 1,
|
||||
enabled: true,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
pub fn validate(&self) -> Result<()> {
|
||||
if self.access.users.is_empty() {
|
||||
return Err(ProxyError::Config("No users configured".to_string()));
|
||||
}
|
||||
|
||||
if !self.general.modes.classic && !self.general.modes.secure && !self.general.modes.tls {
|
||||
return Err(ProxyError::Config("No modes enabled".to_string()));
|
||||
}
|
||||
|
||||
if self.censorship.tls_domain.contains(' ') || self.censorship.tls_domain.contains('/') {
|
||||
return Err(ProxyError::Config(format!(
|
||||
"Invalid tls_domain: '{}'. Must be a valid domain name",
|
||||
self.censorship.tls_domain
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
pub use load::ProxyConfig;
|
||||
pub use types::*;
|
||||
|
||||
1355
src/config/types.rs
Normal file
1355
src/config/types.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -11,6 +11,8 @@
|
||||
//! `HandshakeSuccess`, `ObfuscationParams`) are responsible for
|
||||
//! zeroizing their own copies.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use aes::Aes256;
|
||||
use ctr::{Ctr128BE, cipher::{KeyIvInit, StreamCipher}};
|
||||
use zeroize::Zeroize;
|
||||
@@ -21,13 +23,13 @@ type Aes256Ctr = Ctr128BE<Aes256>;
|
||||
// ============= AES-256-CTR =============
|
||||
|
||||
/// AES-256-CTR encryptor/decryptor
|
||||
///
|
||||
///
|
||||
/// CTR mode is symmetric — encryption and decryption are the same operation.
|
||||
///
|
||||
/// **Zeroize note:** The inner `Aes256Ctr` cipher state (expanded key schedule
|
||||
/// + counter) is opaque and cannot be zeroized. If you need to protect key
|
||||
/// material, zeroize the `[u8; 32]` key and `u128` IV at the call site
|
||||
/// before dropping them.
|
||||
/// + counter) is opaque and cannot be zeroized. If you need to protect key
|
||||
/// material, zeroize the `[u8; 32]` key and `u128` IV at the call site
|
||||
/// before dropping them.
|
||||
pub struct AesCtr {
|
||||
cipher: Aes256Ctr,
|
||||
}
|
||||
@@ -147,7 +149,7 @@ impl AesCbc {
|
||||
///
|
||||
/// CBC Encryption: C[i] = AES_Encrypt(P[i] XOR C[i-1]), where C[-1] = IV
|
||||
pub fn encrypt(&self, data: &[u8]) -> Result<Vec<u8>> {
|
||||
if data.len() % Self::BLOCK_SIZE != 0 {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
@@ -178,7 +180,7 @@ impl AesCbc {
|
||||
///
|
||||
/// CBC Decryption: P[i] = AES_Decrypt(C[i]) XOR C[i-1], where C[-1] = IV
|
||||
pub fn decrypt(&self, data: &[u8]) -> Result<Vec<u8>> {
|
||||
if data.len() % Self::BLOCK_SIZE != 0 {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
@@ -207,7 +209,7 @@ impl AesCbc {
|
||||
|
||||
/// Encrypt data in-place
|
||||
pub fn encrypt_in_place(&self, data: &mut [u8]) -> Result<()> {
|
||||
if data.len() % Self::BLOCK_SIZE != 0 {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
@@ -240,7 +242,7 @@ impl AesCbc {
|
||||
|
||||
/// Decrypt data in-place
|
||||
pub fn decrypt_in_place(&self, data: &mut [u8]) -> Result<()> {
|
||||
if data.len() % Self::BLOCK_SIZE != 0 {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
|
||||
@@ -55,10 +55,16 @@ pub fn crc32(data: &[u8]) -> u32 {
|
||||
crc32fast::hash(data)
|
||||
}
|
||||
|
||||
/// CRC32C (Castagnoli)
|
||||
pub fn crc32c(data: &[u8]) -> u32 {
|
||||
crc32c::crc32c(data)
|
||||
}
|
||||
|
||||
/// Build the exact prekey buffer used by Telegram Middle Proxy KDF.
|
||||
///
|
||||
/// Returned buffer layout (IPv4):
|
||||
/// nonce_srv | nonce_clt | clt_ts | srv_ip | clt_port | purpose | clt_ip | srv_port | secret | nonce_srv | [clt_v6 | srv_v6] | nonce_clt
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn build_middleproxy_prekey(
|
||||
nonce_srv: &[u8; 16],
|
||||
nonce_clt: &[u8; 16],
|
||||
@@ -103,6 +109,7 @@ pub fn build_middleproxy_prekey(
|
||||
/// Uses MD5 + SHA-1 as mandated by the Telegram Middle Proxy protocol.
|
||||
/// These algorithms are NOT replaceable here — changing them would break
|
||||
/// interoperability with Telegram's middle proxy infrastructure.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn derive_middleproxy_keys(
|
||||
nonce_srv: &[u8; 16],
|
||||
nonce_clt: &[u8; 16],
|
||||
@@ -172,7 +179,7 @@ mod tests {
|
||||
let digest = sha256(&prekey);
|
||||
assert_eq!(
|
||||
hex::encode(digest),
|
||||
"a4595b75f1f610f2575ace802ddc65c91b5acef3b0e0d18189e0c7c9f787d15c"
|
||||
"934f5facdafd65a44d5c2df90d2f35ddc81faaaeb337949dfeef817c8a7c1e00"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,5 +5,7 @@ pub mod hash;
|
||||
pub mod random;
|
||||
|
||||
pub use aes::{AesCtr, AesCbc};
|
||||
pub use hash::{sha256, sha256_hmac, sha1, md5, crc32, derive_middleproxy_keys, build_middleproxy_prekey};
|
||||
pub use hash::{
|
||||
build_middleproxy_prekey, crc32, crc32c, derive_middleproxy_keys, sha256, sha256_hmac,
|
||||
};
|
||||
pub use random::SecureRandom;
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
//! Pseudorandom
|
||||
|
||||
#![allow(deprecated)]
|
||||
#![allow(dead_code)]
|
||||
|
||||
use rand::{Rng, RngCore, SeedableRng};
|
||||
use rand::rngs::StdRng;
|
||||
use parking_lot::Mutex;
|
||||
@@ -11,10 +14,14 @@ pub struct SecureRandom {
|
||||
inner: Mutex<SecureRandomInner>,
|
||||
}
|
||||
|
||||
unsafe impl Send for SecureRandom {}
|
||||
unsafe impl Sync for SecureRandom {}
|
||||
|
||||
struct SecureRandomInner {
|
||||
rng: StdRng,
|
||||
cipher: AesCtr,
|
||||
buffer: Vec<u8>,
|
||||
buffer_start: usize,
|
||||
}
|
||||
|
||||
impl Drop for SecureRandomInner {
|
||||
@@ -42,23 +49,50 @@ impl SecureRandom {
|
||||
rng,
|
||||
cipher,
|
||||
buffer: Vec::with_capacity(1024),
|
||||
buffer_start: 0,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate random bytes
|
||||
pub fn bytes(&self, len: usize) -> Vec<u8> {
|
||||
/// Fill a caller-provided buffer with random bytes.
|
||||
pub fn fill(&self, out: &mut [u8]) {
|
||||
let mut inner = self.inner.lock();
|
||||
const CHUNK_SIZE: usize = 512;
|
||||
|
||||
while inner.buffer.len() < len {
|
||||
let mut chunk = vec![0u8; CHUNK_SIZE];
|
||||
inner.rng.fill_bytes(&mut chunk);
|
||||
inner.cipher.apply(&mut chunk);
|
||||
inner.buffer.extend_from_slice(&chunk);
|
||||
|
||||
let mut written = 0usize;
|
||||
while written < out.len() {
|
||||
if inner.buffer_start >= inner.buffer.len() {
|
||||
inner.buffer.clear();
|
||||
inner.buffer_start = 0;
|
||||
}
|
||||
|
||||
if inner.buffer.is_empty() {
|
||||
let mut chunk = vec![0u8; CHUNK_SIZE];
|
||||
inner.rng.fill_bytes(&mut chunk);
|
||||
inner.cipher.apply(&mut chunk);
|
||||
inner.buffer.extend_from_slice(&chunk);
|
||||
inner.buffer_start = 0;
|
||||
}
|
||||
|
||||
let available = inner.buffer.len().saturating_sub(inner.buffer_start);
|
||||
let take = (out.len() - written).min(available);
|
||||
let start = inner.buffer_start;
|
||||
let end = start + take;
|
||||
out[written..written + take].copy_from_slice(&inner.buffer[start..end]);
|
||||
inner.buffer_start = end;
|
||||
if inner.buffer_start >= inner.buffer.len() {
|
||||
inner.buffer.clear();
|
||||
inner.buffer_start = 0;
|
||||
}
|
||||
written += take;
|
||||
}
|
||||
|
||||
inner.buffer.drain(..len).collect()
|
||||
}
|
||||
|
||||
/// Generate random bytes
|
||||
pub fn bytes(&self, len: usize) -> Vec<u8> {
|
||||
let mut out = vec![0u8; len];
|
||||
self.fill(&mut out);
|
||||
out
|
||||
}
|
||||
|
||||
/// Generate random number in range [0, max)
|
||||
@@ -76,7 +110,7 @@ impl SecureRandom {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let bytes_needed = (k + 7) / 8;
|
||||
let bytes_needed = k.div_ceil(8);
|
||||
let bytes = self.bytes(bytes_needed.min(8));
|
||||
|
||||
let mut result = 0u64;
|
||||
@@ -211,4 +245,4 @@ mod tests {
|
||||
|
||||
assert_ne!(shuffled, original);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
13
src/error.rs
13
src/error.rs
@@ -1,5 +1,7 @@
|
||||
//! Error Types
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::fmt;
|
||||
use std::net::SocketAddr;
|
||||
use thiserror::Error;
|
||||
@@ -89,7 +91,7 @@ impl From<StreamError> for std::io::Error {
|
||||
std::io::Error::new(std::io::ErrorKind::UnexpectedEof, err)
|
||||
}
|
||||
StreamError::Poisoned { .. } => {
|
||||
std::io::Error::new(std::io::ErrorKind::Other, err)
|
||||
std::io::Error::other(err)
|
||||
}
|
||||
StreamError::BufferOverflow { .. } => {
|
||||
std::io::Error::new(std::io::ErrorKind::OutOfMemory, err)
|
||||
@@ -98,7 +100,7 @@ impl From<StreamError> for std::io::Error {
|
||||
std::io::Error::new(std::io::ErrorKind::InvalidData, err)
|
||||
}
|
||||
StreamError::PartialRead { .. } | StreamError::PartialWrite { .. } => {
|
||||
std::io::Error::new(std::io::ErrorKind::Other, err)
|
||||
std::io::Error::other(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -133,12 +135,7 @@ impl Recoverable for StreamError {
|
||||
}
|
||||
|
||||
fn can_continue(&self) -> bool {
|
||||
match self {
|
||||
Self::Poisoned { .. } => false,
|
||||
Self::UnexpectedEof => false,
|
||||
Self::BufferOverflow { .. } => false,
|
||||
_ => true,
|
||||
}
|
||||
!matches!(self, Self::Poisoned { .. } | Self::UnexpectedEof | Self::BufferOverflow { .. })
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
573
src/ip_tracker.rs
Normal file
573
src/ip_tracker.rs
Normal file
@@ -0,0 +1,573 @@
|
||||
// IP address tracking and per-user unique IP limiting.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::net::IpAddr;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::config::UserMaxUniqueIpsMode;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct UserIpTracker {
|
||||
active_ips: Arc<RwLock<HashMap<String, HashMap<IpAddr, usize>>>>,
|
||||
recent_ips: Arc<RwLock<HashMap<String, HashMap<IpAddr, Instant>>>>,
|
||||
max_ips: Arc<RwLock<HashMap<String, usize>>>,
|
||||
limit_mode: Arc<RwLock<UserMaxUniqueIpsMode>>,
|
||||
limit_window: Arc<RwLock<Duration>>,
|
||||
}
|
||||
|
||||
impl UserIpTracker {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
active_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||
recent_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||
max_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||
limit_mode: Arc::new(RwLock::new(UserMaxUniqueIpsMode::ActiveWindow)),
|
||||
limit_window: Arc::new(RwLock::new(Duration::from_secs(30))),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn set_limit_policy(&self, mode: UserMaxUniqueIpsMode, window_secs: u64) {
|
||||
{
|
||||
let mut current_mode = self.limit_mode.write().await;
|
||||
*current_mode = mode;
|
||||
}
|
||||
let mut current_window = self.limit_window.write().await;
|
||||
*current_window = Duration::from_secs(window_secs.max(1));
|
||||
}
|
||||
|
||||
pub async fn set_user_limit(&self, username: &str, max_ips: usize) {
|
||||
let mut limits = self.max_ips.write().await;
|
||||
limits.insert(username.to_string(), max_ips);
|
||||
}
|
||||
|
||||
pub async fn remove_user_limit(&self, username: &str) {
|
||||
let mut limits = self.max_ips.write().await;
|
||||
limits.remove(username);
|
||||
}
|
||||
|
||||
pub async fn load_limits(&self, limits: &HashMap<String, usize>) {
|
||||
let mut max_ips = self.max_ips.write().await;
|
||||
max_ips.clone_from(limits);
|
||||
}
|
||||
|
||||
fn prune_recent(user_recent: &mut HashMap<IpAddr, Instant>, now: Instant, window: Duration) {
|
||||
if user_recent.is_empty() {
|
||||
return;
|
||||
}
|
||||
user_recent.retain(|_, seen_at| now.duration_since(*seen_at) <= window);
|
||||
}
|
||||
|
||||
pub async fn check_and_add(&self, username: &str, ip: IpAddr) -> Result<(), String> {
|
||||
let limit = {
|
||||
let max_ips = self.max_ips.read().await;
|
||||
max_ips.get(username).copied()
|
||||
};
|
||||
let mode = *self.limit_mode.read().await;
|
||||
let window = *self.limit_window.read().await;
|
||||
let now = Instant::now();
|
||||
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
let user_active = active_ips
|
||||
.entry(username.to_string())
|
||||
.or_insert_with(HashMap::new);
|
||||
|
||||
let mut recent_ips = self.recent_ips.write().await;
|
||||
let user_recent = recent_ips
|
||||
.entry(username.to_string())
|
||||
.or_insert_with(HashMap::new);
|
||||
Self::prune_recent(user_recent, now, window);
|
||||
|
||||
if let Some(count) = user_active.get_mut(&ip) {
|
||||
*count = count.saturating_add(1);
|
||||
user_recent.insert(ip, now);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Some(limit) = limit {
|
||||
let active_limit_reached = user_active.len() >= limit;
|
||||
let recent_limit_reached = user_recent.len() >= limit;
|
||||
let deny = match mode {
|
||||
UserMaxUniqueIpsMode::ActiveWindow => active_limit_reached,
|
||||
UserMaxUniqueIpsMode::TimeWindow => recent_limit_reached,
|
||||
UserMaxUniqueIpsMode::Combined => active_limit_reached || recent_limit_reached,
|
||||
};
|
||||
|
||||
if deny {
|
||||
return Err(format!(
|
||||
"IP limit reached for user '{}': active={}/{} recent={}/{} mode={:?}",
|
||||
username,
|
||||
user_active.len(),
|
||||
limit,
|
||||
user_recent.len(),
|
||||
limit,
|
||||
mode
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
user_active.insert(ip, 1);
|
||||
user_recent.insert(ip, now);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn remove_ip(&self, username: &str, ip: IpAddr) {
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
if let Some(user_ips) = active_ips.get_mut(username) {
|
||||
if let Some(count) = user_ips.get_mut(&ip) {
|
||||
if *count > 1 {
|
||||
*count -= 1;
|
||||
} else {
|
||||
user_ips.remove(&ip);
|
||||
}
|
||||
}
|
||||
if user_ips.is_empty() {
|
||||
active_ips.remove(username);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_recent_counts_for_users(&self, users: &[String]) -> HashMap<String, usize> {
|
||||
let window = *self.limit_window.read().await;
|
||||
let now = Instant::now();
|
||||
let recent_ips = self.recent_ips.read().await;
|
||||
|
||||
let mut counts = HashMap::with_capacity(users.len());
|
||||
for user in users {
|
||||
let count = if let Some(user_recent) = recent_ips.get(user) {
|
||||
user_recent
|
||||
.values()
|
||||
.filter(|seen_at| now.duration_since(**seen_at) <= window)
|
||||
.count()
|
||||
} else {
|
||||
0
|
||||
};
|
||||
counts.insert(user.clone(), count);
|
||||
}
|
||||
counts
|
||||
}
|
||||
|
||||
pub async fn get_active_ips_for_users(&self, users: &[String]) -> HashMap<String, Vec<IpAddr>> {
|
||||
let active_ips = self.active_ips.read().await;
|
||||
let mut out = HashMap::with_capacity(users.len());
|
||||
for user in users {
|
||||
let mut ips = active_ips
|
||||
.get(user)
|
||||
.map(|per_ip| per_ip.keys().copied().collect::<Vec<_>>())
|
||||
.unwrap_or_else(Vec::new);
|
||||
ips.sort();
|
||||
out.insert(user.clone(), ips);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
pub async fn get_recent_ips_for_users(&self, users: &[String]) -> HashMap<String, Vec<IpAddr>> {
|
||||
let window = *self.limit_window.read().await;
|
||||
let now = Instant::now();
|
||||
let recent_ips = self.recent_ips.read().await;
|
||||
|
||||
let mut out = HashMap::with_capacity(users.len());
|
||||
for user in users {
|
||||
let mut ips = if let Some(user_recent) = recent_ips.get(user) {
|
||||
user_recent
|
||||
.iter()
|
||||
.filter(|(_, seen_at)| now.duration_since(**seen_at) <= window)
|
||||
.map(|(ip, _)| *ip)
|
||||
.collect::<Vec<_>>()
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
ips.sort();
|
||||
out.insert(user.clone(), ips);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
pub async fn get_active_ip_count(&self, username: &str) -> usize {
|
||||
let active_ips = self.active_ips.read().await;
|
||||
active_ips.get(username).map(|ips| ips.len()).unwrap_or(0)
|
||||
}
|
||||
|
||||
pub async fn get_active_ips(&self, username: &str) -> Vec<IpAddr> {
|
||||
let active_ips = self.active_ips.read().await;
|
||||
active_ips
|
||||
.get(username)
|
||||
.map(|ips| ips.keys().copied().collect())
|
||||
.unwrap_or_else(Vec::new)
|
||||
}
|
||||
|
||||
pub async fn get_stats(&self) -> Vec<(String, usize, usize)> {
|
||||
let active_ips = self.active_ips.read().await;
|
||||
let max_ips = self.max_ips.read().await;
|
||||
|
||||
let mut stats = Vec::new();
|
||||
for (username, user_ips) in active_ips.iter() {
|
||||
let limit = max_ips.get(username).copied().unwrap_or(0);
|
||||
stats.push((username.clone(), user_ips.len(), limit));
|
||||
}
|
||||
|
||||
stats.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
stats
|
||||
}
|
||||
|
||||
pub async fn clear_user_ips(&self, username: &str) {
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
active_ips.remove(username);
|
||||
drop(active_ips);
|
||||
|
||||
let mut recent_ips = self.recent_ips.write().await;
|
||||
recent_ips.remove(username);
|
||||
}
|
||||
|
||||
pub async fn clear_all(&self) {
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
active_ips.clear();
|
||||
drop(active_ips);
|
||||
|
||||
let mut recent_ips = self.recent_ips.write().await;
|
||||
recent_ips.clear();
|
||||
}
|
||||
|
||||
pub async fn is_ip_active(&self, username: &str, ip: IpAddr) -> bool {
|
||||
let active_ips = self.active_ips.read().await;
|
||||
active_ips
|
||||
.get(username)
|
||||
.map(|ips| ips.contains_key(&ip))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
pub async fn get_user_limit(&self, username: &str) -> Option<usize> {
|
||||
let max_ips = self.max_ips.read().await;
|
||||
max_ips.get(username).copied()
|
||||
}
|
||||
|
||||
pub async fn format_stats(&self) -> String {
|
||||
let stats = self.get_stats().await;
|
||||
|
||||
if stats.is_empty() {
|
||||
return String::from("No active users");
|
||||
}
|
||||
|
||||
let mut output = String::from("User IP Statistics:\n");
|
||||
output.push_str("==================\n");
|
||||
|
||||
for (username, active_count, limit) in stats {
|
||||
output.push_str(&format!(
|
||||
"User: {:<20} Active IPs: {}/{}\n",
|
||||
username,
|
||||
active_count,
|
||||
if limit > 0 {
|
||||
limit.to_string()
|
||||
} else {
|
||||
"unlimited".to_string()
|
||||
}
|
||||
));
|
||||
|
||||
let ips = self.get_active_ips(&username).await;
|
||||
for ip in ips {
|
||||
output.push_str(&format!(" - {}\n", ip));
|
||||
}
|
||||
}
|
||||
|
||||
output
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for UserIpTracker {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
||||
|
||||
fn test_ipv4(oct1: u8, oct2: u8, oct3: u8, oct4: u8) -> IpAddr {
|
||||
IpAddr::V4(Ipv4Addr::new(oct1, oct2, oct3, oct4))
|
||||
}
|
||||
|
||||
fn test_ipv6() -> IpAddr {
|
||||
IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_ip_limit() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 2).await;
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
let ip3 = test_ipv4(192, 168, 1, 3);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
|
||||
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_active_window_rejects_new_ip_and_keeps_existing_session() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 1).await;
|
||||
tracker
|
||||
.set_limit_policy(UserMaxUniqueIpsMode::ActiveWindow, 30)
|
||||
.await;
|
||||
|
||||
let ip1 = test_ipv4(10, 10, 10, 1);
|
||||
let ip2 = test_ipv4(10, 10, 10, 2);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.is_ip_active("test_user", ip1).await);
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
|
||||
// Existing session remains active; only new unique IP is denied.
|
||||
assert!(tracker.is_ip_active("test_user", ip1).await);
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_reconnection_from_same_ip() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 2).await;
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_same_ip_disconnect_keeps_active_while_other_session_alive() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 2).await;
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ip_removal() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 2).await;
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
let ip3 = test_ipv4(192, 168, 1, 3);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
|
||||
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip3).await.is_ok());
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_no_limit() {
|
||||
let tracker = UserIpTracker::new();
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
let ip3 = test_ipv4(192, 168, 1, 3);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip3).await.is_ok());
|
||||
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 3);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_multiple_users() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("user1", 2).await;
|
||||
tracker.set_user_limit("user2", 1).await;
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
|
||||
assert!(tracker.check_and_add("user1", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("user1", ip2).await.is_ok());
|
||||
|
||||
assert!(tracker.check_and_add("user2", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("user2", ip2).await.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ipv6_support() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 2).await;
|
||||
|
||||
let ipv4 = test_ipv4(192, 168, 1, 1);
|
||||
let ipv6 = test_ipv6();
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ipv4).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ipv6).await.is_ok());
|
||||
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_active_ips() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 3).await;
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
|
||||
tracker.check_and_add("test_user", ip1).await.unwrap();
|
||||
tracker.check_and_add("test_user", ip2).await.unwrap();
|
||||
|
||||
let active_ips = tracker.get_active_ips("test_user").await;
|
||||
assert_eq!(active_ips.len(), 2);
|
||||
assert!(active_ips.contains(&ip1));
|
||||
assert!(active_ips.contains(&ip2));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_stats() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("user1", 3).await;
|
||||
tracker.set_user_limit("user2", 2).await;
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
|
||||
tracker.check_and_add("user1", ip1).await.unwrap();
|
||||
tracker.check_and_add("user2", ip2).await.unwrap();
|
||||
|
||||
let stats = tracker.get_stats().await;
|
||||
assert_eq!(stats.len(), 2);
|
||||
|
||||
assert!(stats.iter().any(|(name, _, _)| name == "user1"));
|
||||
assert!(stats.iter().any(|(name, _, _)| name == "user2"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clear_user_ips() {
|
||||
let tracker = UserIpTracker::new();
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
|
||||
tracker.check_and_add("test_user", ip1).await.unwrap();
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
|
||||
tracker.clear_user_ips("test_user").await;
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_is_ip_active() {
|
||||
let tracker = UserIpTracker::new();
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
|
||||
tracker.check_and_add("test_user", ip1).await.unwrap();
|
||||
|
||||
assert!(tracker.is_ip_active("test_user", ip1).await);
|
||||
assert!(!tracker.is_ip_active("test_user", ip2).await);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_load_limits_from_config() {
|
||||
let tracker = UserIpTracker::new();
|
||||
|
||||
let mut config_limits = HashMap::new();
|
||||
config_limits.insert("user1".to_string(), 5);
|
||||
config_limits.insert("user2".to_string(), 3);
|
||||
|
||||
tracker.load_limits(&config_limits).await;
|
||||
|
||||
assert_eq!(tracker.get_user_limit("user1").await, Some(5));
|
||||
assert_eq!(tracker.get_user_limit("user2").await, Some(3));
|
||||
assert_eq!(tracker.get_user_limit("user3").await, None);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_load_limits_replaces_previous_map() {
|
||||
let tracker = UserIpTracker::new();
|
||||
|
||||
let mut first = HashMap::new();
|
||||
first.insert("user1".to_string(), 2);
|
||||
first.insert("user2".to_string(), 3);
|
||||
tracker.load_limits(&first).await;
|
||||
|
||||
let mut second = HashMap::new();
|
||||
second.insert("user2".to_string(), 5);
|
||||
tracker.load_limits(&second).await;
|
||||
|
||||
assert_eq!(tracker.get_user_limit("user1").await, None);
|
||||
assert_eq!(tracker.get_user_limit("user2").await, Some(5));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_time_window_mode_blocks_recent_ip_churn() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 1).await;
|
||||
tracker
|
||||
.set_limit_policy(UserMaxUniqueIpsMode::TimeWindow, 30)
|
||||
.await;
|
||||
|
||||
let ip1 = test_ipv4(10, 0, 0, 1);
|
||||
let ip2 = test_ipv4(10, 0, 0, 2);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_combined_mode_enforces_active_and_recent_limits() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 1).await;
|
||||
tracker
|
||||
.set_limit_policy(UserMaxUniqueIpsMode::Combined, 30)
|
||||
.await;
|
||||
|
||||
let ip1 = test_ipv4(10, 0, 1, 1);
|
||||
let ip2 = test_ipv4(10, 0, 1, 2);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_time_window_expires() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 1).await;
|
||||
tracker
|
||||
.set_limit_policy(UserMaxUniqueIpsMode::TimeWindow, 1)
|
||||
.await;
|
||||
|
||||
let ip1 = test_ipv4(10, 1, 0, 1);
|
||||
let ip2 = test_ipv4(10, 1, 0, 2);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(1100)).await;
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
}
|
||||
}
|
||||
1563
src/main.rs
1563
src/main.rs
File diff suppressed because it is too large
Load Diff
1551
src/metrics.rs
Normal file
1551
src/metrics.rs
Normal file
File diff suppressed because it is too large
Load Diff
197
src/network/dns_overrides.rs
Normal file
197
src/network/dns_overrides.rs
Normal file
@@ -0,0 +1,197 @@
|
||||
//! Runtime DNS overrides for `host:port` targets.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::net::{IpAddr, Ipv6Addr, SocketAddr};
|
||||
use std::sync::{OnceLock, RwLock};
|
||||
|
||||
use crate::error::{ProxyError, Result};
|
||||
|
||||
type OverrideMap = HashMap<(String, u16), IpAddr>;
|
||||
|
||||
static DNS_OVERRIDES: OnceLock<RwLock<OverrideMap>> = OnceLock::new();
|
||||
|
||||
fn overrides_store() -> &'static RwLock<OverrideMap> {
|
||||
DNS_OVERRIDES.get_or_init(|| RwLock::new(HashMap::new()))
|
||||
}
|
||||
|
||||
fn parse_ip_spec(ip_spec: &str) -> Result<IpAddr> {
|
||||
if ip_spec.starts_with('[') && ip_spec.ends_with(']') {
|
||||
let inner = &ip_spec[1..ip_spec.len() - 1];
|
||||
let ipv6 = inner.parse::<Ipv6Addr>().map_err(|_| {
|
||||
ProxyError::Config(format!(
|
||||
"network.dns_overrides IPv6 override is invalid: '{ip_spec}'"
|
||||
))
|
||||
})?;
|
||||
return Ok(IpAddr::V6(ipv6));
|
||||
}
|
||||
|
||||
let ip = ip_spec.parse::<IpAddr>().map_err(|_| {
|
||||
ProxyError::Config(format!(
|
||||
"network.dns_overrides IP is invalid: '{ip_spec}'"
|
||||
))
|
||||
})?;
|
||||
if matches!(ip, IpAddr::V6(_)) {
|
||||
return Err(ProxyError::Config(format!(
|
||||
"network.dns_overrides IPv6 must be bracketed: '{ip_spec}'"
|
||||
)));
|
||||
}
|
||||
Ok(ip)
|
||||
}
|
||||
|
||||
fn parse_entry(entry: &str) -> Result<((String, u16), IpAddr)> {
|
||||
let trimmed = entry.trim();
|
||||
if trimmed.is_empty() {
|
||||
return Err(ProxyError::Config(
|
||||
"network.dns_overrides entry cannot be empty".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let first_sep = trimmed.find(':').ok_or_else(|| {
|
||||
ProxyError::Config(format!(
|
||||
"network.dns_overrides entry must use host:port:ip format: '{trimmed}'"
|
||||
))
|
||||
})?;
|
||||
let second_sep = trimmed[first_sep + 1..]
|
||||
.find(':')
|
||||
.map(|idx| first_sep + 1 + idx)
|
||||
.ok_or_else(|| {
|
||||
ProxyError::Config(format!(
|
||||
"network.dns_overrides entry must use host:port:ip format: '{trimmed}'"
|
||||
))
|
||||
})?;
|
||||
|
||||
let host = trimmed[..first_sep].trim();
|
||||
let port_str = trimmed[first_sep + 1..second_sep].trim();
|
||||
let ip_str = trimmed[second_sep + 1..].trim();
|
||||
|
||||
if host.is_empty() {
|
||||
return Err(ProxyError::Config(format!(
|
||||
"network.dns_overrides host cannot be empty: '{trimmed}'"
|
||||
)));
|
||||
}
|
||||
if host.contains(':') {
|
||||
return Err(ProxyError::Config(format!(
|
||||
"network.dns_overrides host must be a domain name without ':' in this format: '{trimmed}'"
|
||||
)));
|
||||
}
|
||||
|
||||
let port = port_str.parse::<u16>().map_err(|_| {
|
||||
ProxyError::Config(format!(
|
||||
"network.dns_overrides port is invalid: '{trimmed}'"
|
||||
))
|
||||
})?;
|
||||
let ip = parse_ip_spec(ip_str)?;
|
||||
|
||||
Ok(((host.to_ascii_lowercase(), port), ip))
|
||||
}
|
||||
|
||||
fn parse_entries(entries: &[String]) -> Result<OverrideMap> {
|
||||
let mut parsed = HashMap::new();
|
||||
for entry in entries {
|
||||
let (key, ip) = parse_entry(entry)?;
|
||||
parsed.insert(key, ip);
|
||||
}
|
||||
Ok(parsed)
|
||||
}
|
||||
|
||||
/// Validate `network.dns_overrides` entries without updating runtime state.
|
||||
pub fn validate_entries(entries: &[String]) -> Result<()> {
|
||||
let _ = parse_entries(entries)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Replace runtime DNS overrides with a new validated snapshot.
|
||||
pub fn install_entries(entries: &[String]) -> Result<()> {
|
||||
let parsed = parse_entries(entries)?;
|
||||
let mut guard = overrides_store()
|
||||
.write()
|
||||
.map_err(|_| ProxyError::Config("network.dns_overrides runtime lock is poisoned".to_string()))?;
|
||||
*guard = parsed;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Resolve a hostname override for `(host, port)` if present.
|
||||
pub fn resolve(host: &str, port: u16) -> Option<IpAddr> {
|
||||
let key = (host.to_ascii_lowercase(), port);
|
||||
overrides_store()
|
||||
.read()
|
||||
.ok()
|
||||
.and_then(|guard| guard.get(&key).copied())
|
||||
}
|
||||
|
||||
/// Resolve a hostname override and construct a socket address when present.
|
||||
pub fn resolve_socket_addr(host: &str, port: u16) -> Option<SocketAddr> {
|
||||
resolve(host, port).map(|ip| SocketAddr::new(ip, port))
|
||||
}
|
||||
|
||||
/// Parse a runtime endpoint in `host:port` format.
|
||||
///
|
||||
/// Supports:
|
||||
/// - `example.com:443`
|
||||
/// - `[2001:db8::1]:443`
|
||||
pub fn split_host_port(endpoint: &str) -> Option<(String, u16)> {
|
||||
if endpoint.starts_with('[') {
|
||||
let bracket_end = endpoint.find(']')?;
|
||||
if endpoint.as_bytes().get(bracket_end + 1) != Some(&b':') {
|
||||
return None;
|
||||
}
|
||||
let host = endpoint[1..bracket_end].trim();
|
||||
let port = endpoint[bracket_end + 2..].trim().parse::<u16>().ok()?;
|
||||
if host.is_empty() {
|
||||
return None;
|
||||
}
|
||||
return Some((host.to_ascii_lowercase(), port));
|
||||
}
|
||||
|
||||
let split_idx = endpoint.rfind(':')?;
|
||||
let host = endpoint[..split_idx].trim();
|
||||
let port = endpoint[split_idx + 1..].trim().parse::<u16>().ok()?;
|
||||
if host.is_empty() || host.contains(':') {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some((host.to_ascii_lowercase(), port))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn validate_accepts_ipv4_and_bracketed_ipv6() {
|
||||
let entries = vec![
|
||||
"example.com:443:127.0.0.1".to_string(),
|
||||
"example.net:8443:[2001:db8::10]".to_string(),
|
||||
];
|
||||
assert!(validate_entries(&entries).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_rejects_unbracketed_ipv6() {
|
||||
let entries = vec!["example.net:443:2001:db8::10".to_string()];
|
||||
let err = validate_entries(&entries).unwrap_err().to_string();
|
||||
assert!(err.contains("must be bracketed"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn install_and_resolve_are_case_insensitive_for_host() {
|
||||
let entries = vec!["MyPetrovich.ru:8443:127.0.0.1".to_string()];
|
||||
install_entries(&entries).unwrap();
|
||||
|
||||
let resolved = resolve("mypetrovich.ru", 8443);
|
||||
assert_eq!(resolved, Some("127.0.0.1".parse().unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn split_host_port_parses_supported_shapes() {
|
||||
assert_eq!(
|
||||
split_host_port("example.com:443"),
|
||||
Some(("example.com".to_string(), 443))
|
||||
);
|
||||
assert_eq!(
|
||||
split_host_port("[2001:db8::1]:443"),
|
||||
Some(("2001:db8::1".to_string(), 443))
|
||||
);
|
||||
assert_eq!(split_host_port("2001:db8::1:443"), None);
|
||||
}
|
||||
}
|
||||
5
src/network/mod.rs
Normal file
5
src/network/mod.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
pub mod dns_overrides;
|
||||
pub mod probe;
|
||||
pub mod stun;
|
||||
|
||||
pub use stun::IpFamily;
|
||||
370
src/network/probe.rs
Normal file
370
src/network/probe.rs
Normal file
@@ -0,0 +1,370 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::task::JoinSet;
|
||||
use tokio::time::timeout;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::config::NetworkConfig;
|
||||
use crate::error::Result;
|
||||
use crate::network::stun::{stun_probe_dual, DualStunResult, IpFamily, StunProbeResult};
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct NetworkProbe {
|
||||
pub detected_ipv4: Option<Ipv4Addr>,
|
||||
pub detected_ipv6: Option<Ipv6Addr>,
|
||||
pub reflected_ipv4: Option<SocketAddr>,
|
||||
pub reflected_ipv6: Option<SocketAddr>,
|
||||
pub ipv4_is_bogon: bool,
|
||||
pub ipv6_is_bogon: bool,
|
||||
pub ipv4_nat_detected: bool,
|
||||
pub ipv6_nat_detected: bool,
|
||||
pub ipv4_usable: bool,
|
||||
pub ipv6_usable: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct NetworkDecision {
|
||||
pub ipv4_dc: bool,
|
||||
pub ipv6_dc: bool,
|
||||
pub ipv4_me: bool,
|
||||
pub ipv6_me: bool,
|
||||
pub effective_prefer: u8,
|
||||
pub effective_multipath: bool,
|
||||
}
|
||||
|
||||
impl NetworkDecision {
|
||||
pub fn prefer_ipv6(&self) -> bool {
|
||||
self.effective_prefer == 6
|
||||
}
|
||||
|
||||
pub fn me_families(&self) -> Vec<IpFamily> {
|
||||
let mut res = Vec::new();
|
||||
if self.ipv4_me {
|
||||
res.push(IpFamily::V4);
|
||||
}
|
||||
if self.ipv6_me {
|
||||
res.push(IpFamily::V6);
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
const STUN_BATCH_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
pub async fn run_probe(
|
||||
config: &NetworkConfig,
|
||||
nat_probe: bool,
|
||||
stun_nat_probe_concurrency: usize,
|
||||
) -> Result<NetworkProbe> {
|
||||
let mut probe = NetworkProbe::default();
|
||||
|
||||
probe.detected_ipv4 = detect_local_ip_v4();
|
||||
probe.detected_ipv6 = detect_local_ip_v6();
|
||||
|
||||
probe.ipv4_is_bogon = probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false);
|
||||
probe.ipv6_is_bogon = probe.detected_ipv6.map(is_bogon_v6).unwrap_or(false);
|
||||
|
||||
let stun_res = if nat_probe && config.stun_use {
|
||||
let servers = collect_stun_servers(config);
|
||||
if servers.is_empty() {
|
||||
warn!("STUN probe is enabled but network.stun_servers is empty");
|
||||
DualStunResult::default()
|
||||
} else {
|
||||
probe_stun_servers_parallel(
|
||||
&servers,
|
||||
stun_nat_probe_concurrency.max(1),
|
||||
)
|
||||
.await
|
||||
}
|
||||
} else if nat_probe {
|
||||
info!("STUN probe is disabled by network.stun_use=false");
|
||||
DualStunResult::default()
|
||||
} else {
|
||||
DualStunResult::default()
|
||||
};
|
||||
probe.reflected_ipv4 = stun_res.v4.map(|r| r.reflected_addr);
|
||||
probe.reflected_ipv6 = stun_res.v6.map(|r| r.reflected_addr);
|
||||
|
||||
// If STUN is blocked but IPv4 is private, try HTTP public-IP fallback.
|
||||
if nat_probe
|
||||
&& probe.reflected_ipv4.is_none()
|
||||
&& probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false)
|
||||
{
|
||||
if let Some(public_ip) = detect_public_ipv4_http(&config.http_ip_detect_urls).await {
|
||||
probe.reflected_ipv4 = Some(SocketAddr::new(IpAddr::V4(public_ip), 0));
|
||||
info!(public_ip = %public_ip, "STUN unavailable, using HTTP public IPv4 fallback");
|
||||
}
|
||||
}
|
||||
|
||||
probe.ipv4_nat_detected = match (probe.detected_ipv4, probe.reflected_ipv4) {
|
||||
(Some(det), Some(reflected)) => det != reflected.ip(),
|
||||
_ => false,
|
||||
};
|
||||
probe.ipv6_nat_detected = match (probe.detected_ipv6, probe.reflected_ipv6) {
|
||||
(Some(det), Some(reflected)) => det != reflected.ip(),
|
||||
_ => false,
|
||||
};
|
||||
|
||||
probe.ipv4_usable = config.ipv4
|
||||
&& probe.detected_ipv4.is_some()
|
||||
&& (!probe.ipv4_is_bogon || probe.reflected_ipv4.map(|r| !is_bogon(r.ip())).unwrap_or(false));
|
||||
|
||||
let ipv6_enabled = config.ipv6.unwrap_or(probe.detected_ipv6.is_some());
|
||||
probe.ipv6_usable = ipv6_enabled
|
||||
&& probe.detected_ipv6.is_some()
|
||||
&& (!probe.ipv6_is_bogon || probe.reflected_ipv6.map(|r| !is_bogon(r.ip())).unwrap_or(false));
|
||||
|
||||
Ok(probe)
|
||||
}
|
||||
|
||||
async fn detect_public_ipv4_http(urls: &[String]) -> Option<Ipv4Addr> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(Duration::from_secs(3))
|
||||
.build()
|
||||
.ok()?;
|
||||
|
||||
for url in urls {
|
||||
let response = match client.get(url).send().await {
|
||||
Ok(response) => response,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
let body = match response.text().await {
|
||||
Ok(body) => body,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
let Ok(ip) = body.trim().parse::<Ipv4Addr>() else {
|
||||
continue;
|
||||
};
|
||||
if !is_bogon_v4(ip) {
|
||||
return Some(ip);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn collect_stun_servers(config: &NetworkConfig) -> Vec<String> {
|
||||
let mut out = Vec::new();
|
||||
for s in &config.stun_servers {
|
||||
if !s.is_empty() && !out.contains(s) {
|
||||
out.push(s.clone());
|
||||
}
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
async fn probe_stun_servers_parallel(
|
||||
servers: &[String],
|
||||
concurrency: usize,
|
||||
) -> DualStunResult {
|
||||
let mut join_set = JoinSet::new();
|
||||
let mut next_idx = 0usize;
|
||||
let mut best_v4_by_ip: HashMap<IpAddr, (usize, StunProbeResult)> = HashMap::new();
|
||||
let mut best_v6_by_ip: HashMap<IpAddr, (usize, StunProbeResult)> = HashMap::new();
|
||||
|
||||
while next_idx < servers.len() || !join_set.is_empty() {
|
||||
while next_idx < servers.len() && join_set.len() < concurrency {
|
||||
let stun_addr = servers[next_idx].clone();
|
||||
next_idx += 1;
|
||||
join_set.spawn(async move {
|
||||
let res = timeout(STUN_BATCH_TIMEOUT, stun_probe_dual(&stun_addr)).await;
|
||||
(stun_addr, res)
|
||||
});
|
||||
}
|
||||
|
||||
let Some(task) = join_set.join_next().await else {
|
||||
break;
|
||||
};
|
||||
|
||||
match task {
|
||||
Ok((stun_addr, Ok(Ok(result)))) => {
|
||||
if let Some(v4) = result.v4 {
|
||||
let entry = best_v4_by_ip.entry(v4.reflected_addr.ip()).or_insert((0, v4));
|
||||
entry.0 += 1;
|
||||
}
|
||||
if let Some(v6) = result.v6 {
|
||||
let entry = best_v6_by_ip.entry(v6.reflected_addr.ip()).or_insert((0, v6));
|
||||
entry.0 += 1;
|
||||
}
|
||||
if result.v4.is_some() || result.v6.is_some() {
|
||||
debug!(stun = %stun_addr, "STUN server responded within probe timeout");
|
||||
}
|
||||
}
|
||||
Ok((stun_addr, Ok(Err(e)))) => {
|
||||
debug!(error = %e, stun = %stun_addr, "STUN probe failed");
|
||||
}
|
||||
Ok((stun_addr, Err(_))) => {
|
||||
debug!(stun = %stun_addr, "STUN probe timeout");
|
||||
}
|
||||
Err(e) => {
|
||||
debug!(error = %e, "STUN probe task join failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut out = DualStunResult::default();
|
||||
if let Some((_, best)) = best_v4_by_ip
|
||||
.into_values()
|
||||
.max_by_key(|(count, _)| *count)
|
||||
{
|
||||
info!("STUN-Quorum reached, IP: {}", best.reflected_addr.ip());
|
||||
out.v4 = Some(best);
|
||||
}
|
||||
if let Some((_, best)) = best_v6_by_ip
|
||||
.into_values()
|
||||
.max_by_key(|(count, _)| *count)
|
||||
{
|
||||
info!("STUN-Quorum reached, IP: {}", best.reflected_addr.ip());
|
||||
out.v6 = Some(best);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
pub fn decide_network_capabilities(config: &NetworkConfig, probe: &NetworkProbe) -> NetworkDecision {
|
||||
let ipv4_dc = config.ipv4 && probe.detected_ipv4.is_some();
|
||||
let ipv6_dc = config.ipv6.unwrap_or(probe.detected_ipv6.is_some()) && probe.detected_ipv6.is_some();
|
||||
|
||||
let ipv4_me = config.ipv4
|
||||
&& probe.detected_ipv4.is_some()
|
||||
&& (!probe.ipv4_is_bogon || probe.reflected_ipv4.is_some());
|
||||
|
||||
let ipv6_enabled = config.ipv6.unwrap_or(probe.detected_ipv6.is_some());
|
||||
let ipv6_me = ipv6_enabled
|
||||
&& probe.detected_ipv6.is_some()
|
||||
&& (!probe.ipv6_is_bogon || probe.reflected_ipv6.is_some());
|
||||
|
||||
let effective_prefer = match config.prefer {
|
||||
6 if ipv6_me || ipv6_dc => 6,
|
||||
4 if ipv4_me || ipv4_dc => 4,
|
||||
6 => {
|
||||
warn!("prefer=6 requested but IPv6 unavailable; falling back to IPv4");
|
||||
4
|
||||
}
|
||||
_ => 4,
|
||||
};
|
||||
|
||||
let me_families = ipv4_me as u8 + ipv6_me as u8;
|
||||
let effective_multipath = config.multipath && me_families >= 2;
|
||||
|
||||
NetworkDecision {
|
||||
ipv4_dc,
|
||||
ipv6_dc,
|
||||
ipv4_me,
|
||||
ipv6_me,
|
||||
effective_prefer,
|
||||
effective_multipath,
|
||||
}
|
||||
}
|
||||
|
||||
fn detect_local_ip_v4() -> Option<Ipv4Addr> {
|
||||
let socket = UdpSocket::bind("0.0.0.0:0").ok()?;
|
||||
socket.connect("8.8.8.8:80").ok()?;
|
||||
match socket.local_addr().ok()?.ip() {
|
||||
IpAddr::V4(v4) => Some(v4),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn detect_local_ip_v6() -> Option<Ipv6Addr> {
|
||||
let socket = UdpSocket::bind("[::]:0").ok()?;
|
||||
socket.connect("[2001:4860:4860::8888]:80").ok()?;
|
||||
match socket.local_addr().ok()?.ip() {
|
||||
IpAddr::V6(v6) => Some(v6),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_bogon(ip: IpAddr) -> bool {
|
||||
match ip {
|
||||
IpAddr::V4(v4) => is_bogon_v4(v4),
|
||||
IpAddr::V6(v6) => is_bogon_v6(v6),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_bogon_v4(ip: Ipv4Addr) -> bool {
|
||||
let octets = ip.octets();
|
||||
if ip.is_private() || ip.is_loopback() || ip.is_link_local() {
|
||||
return true;
|
||||
}
|
||||
if octets[0] == 0 {
|
||||
return true;
|
||||
}
|
||||
if octets[0] == 100 && (octets[1] & 0xC0) == 64 {
|
||||
return true;
|
||||
}
|
||||
if octets[0] == 192 && octets[1] == 0 && octets[2] == 0 {
|
||||
return true;
|
||||
}
|
||||
if octets[0] == 192 && octets[1] == 0 && octets[2] == 2 {
|
||||
return true;
|
||||
}
|
||||
if octets[0] == 198 && (octets[1] & 0xFE) == 18 {
|
||||
return true;
|
||||
}
|
||||
if octets[0] == 198 && octets[1] == 51 && octets[2] == 100 {
|
||||
return true;
|
||||
}
|
||||
if octets[0] == 203 && octets[1] == 0 && octets[2] == 113 {
|
||||
return true;
|
||||
}
|
||||
if ip.is_multicast() {
|
||||
return true;
|
||||
}
|
||||
if octets[0] >= 240 {
|
||||
return true;
|
||||
}
|
||||
if ip.is_broadcast() {
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub fn is_bogon_v6(ip: Ipv6Addr) -> bool {
|
||||
if ip.is_unspecified() || ip.is_loopback() || ip.is_unique_local() {
|
||||
return true;
|
||||
}
|
||||
let segs = ip.segments();
|
||||
if (segs[0] & 0xFFC0) == 0xFE80 {
|
||||
return true;
|
||||
}
|
||||
if segs[0..5] == [0, 0, 0, 0, 0] && segs[5] == 0xFFFF {
|
||||
return true;
|
||||
}
|
||||
if segs[0] == 0x0100 && segs[1..4] == [0, 0, 0] {
|
||||
return true;
|
||||
}
|
||||
if segs[0] == 0x2001 && segs[1] == 0x0db8 {
|
||||
return true;
|
||||
}
|
||||
if segs[0] == 0x2002 {
|
||||
return true;
|
||||
}
|
||||
if ip.is_multicast() {
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub fn log_probe_result(probe: &NetworkProbe, decision: &NetworkDecision) {
|
||||
info!(
|
||||
ipv4 = probe.detected_ipv4.as_ref().map(|v| v.to_string()).unwrap_or_else(|| "-".into()),
|
||||
ipv6 = probe.detected_ipv6.as_ref().map(|v| v.to_string()).unwrap_or_else(|| "-".into()),
|
||||
reflected_v4 = probe.reflected_ipv4.as_ref().map(|v| v.ip().to_string()).unwrap_or_else(|| "-".into()),
|
||||
reflected_v6 = probe.reflected_ipv6.as_ref().map(|v| v.ip().to_string()).unwrap_or_else(|| "-".into()),
|
||||
ipv4_bogon = probe.ipv4_is_bogon,
|
||||
ipv6_bogon = probe.ipv6_is_bogon,
|
||||
ipv4_me = decision.ipv4_me,
|
||||
ipv6_me = decision.ipv6_me,
|
||||
ipv4_dc = decision.ipv4_dc,
|
||||
ipv6_dc = decision.ipv6_dc,
|
||||
prefer = decision.effective_prefer,
|
||||
multipath = decision.effective_multipath,
|
||||
"Network capabilities resolved"
|
||||
);
|
||||
}
|
||||
234
src/network/stun.rs
Normal file
234
src/network/stun.rs
Normal file
@@ -0,0 +1,234 @@
|
||||
#![allow(unreachable_code)]
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
|
||||
|
||||
use tokio::net::{lookup_host, UdpSocket};
|
||||
use tokio::time::{timeout, Duration, sleep};
|
||||
|
||||
use crate::error::{ProxyError, Result};
|
||||
use crate::network::dns_overrides::{resolve, split_host_port};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum IpFamily {
|
||||
V4,
|
||||
V6,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct StunProbeResult {
|
||||
pub local_addr: SocketAddr,
|
||||
pub reflected_addr: SocketAddr,
|
||||
pub family: IpFamily,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct DualStunResult {
|
||||
pub v4: Option<StunProbeResult>,
|
||||
pub v6: Option<StunProbeResult>,
|
||||
}
|
||||
|
||||
pub async fn stun_probe_dual(stun_addr: &str) -> Result<DualStunResult> {
|
||||
let (v4, v6) = tokio::join!(
|
||||
stun_probe_family(stun_addr, IpFamily::V4),
|
||||
stun_probe_family(stun_addr, IpFamily::V6),
|
||||
);
|
||||
|
||||
Ok(DualStunResult {
|
||||
v4: v4?,
|
||||
v6: v6?,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn stun_probe_family(stun_addr: &str, family: IpFamily) -> Result<Option<StunProbeResult>> {
|
||||
stun_probe_family_with_bind(stun_addr, family, None).await
|
||||
}
|
||||
|
||||
pub async fn stun_probe_family_with_bind(
|
||||
stun_addr: &str,
|
||||
family: IpFamily,
|
||||
bind_ip: Option<IpAddr>,
|
||||
) -> Result<Option<StunProbeResult>> {
|
||||
use rand::RngCore;
|
||||
|
||||
let bind_addr = match (family, bind_ip) {
|
||||
(IpFamily::V4, Some(IpAddr::V4(ip))) => SocketAddr::new(IpAddr::V4(ip), 0),
|
||||
(IpFamily::V6, Some(IpAddr::V6(ip))) => SocketAddr::new(IpAddr::V6(ip), 0),
|
||||
(IpFamily::V4, Some(IpAddr::V6(_))) | (IpFamily::V6, Some(IpAddr::V4(_))) => {
|
||||
return Ok(None);
|
||||
}
|
||||
(IpFamily::V4, None) => SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0),
|
||||
(IpFamily::V6, None) => SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0),
|
||||
};
|
||||
|
||||
let socket = match UdpSocket::bind(bind_addr).await {
|
||||
Ok(socket) => socket,
|
||||
Err(_) if bind_ip.is_some() => return Ok(None),
|
||||
Err(e) => return Err(ProxyError::Proxy(format!("STUN bind failed: {e}"))),
|
||||
};
|
||||
|
||||
let target_addr = resolve_stun_addr(stun_addr, family).await?;
|
||||
if let Some(addr) = target_addr {
|
||||
match socket.connect(addr).await {
|
||||
Ok(()) => {}
|
||||
Err(e) if family == IpFamily::V6 && matches!(
|
||||
e.kind(),
|
||||
std::io::ErrorKind::NetworkUnreachable
|
||||
| std::io::ErrorKind::HostUnreachable
|
||||
| std::io::ErrorKind::Unsupported
|
||||
| std::io::ErrorKind::NetworkDown
|
||||
) => return Ok(None),
|
||||
Err(e) => return Err(ProxyError::Proxy(format!("STUN connect failed: {e}"))),
|
||||
}
|
||||
} else {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let mut req = [0u8; 20];
|
||||
req[0..2].copy_from_slice(&0x0001u16.to_be_bytes()); // Binding Request
|
||||
req[2..4].copy_from_slice(&0u16.to_be_bytes()); // length
|
||||
req[4..8].copy_from_slice(&0x2112A442u32.to_be_bytes()); // magic cookie
|
||||
rand::rng().fill_bytes(&mut req[8..20]); // transaction ID
|
||||
|
||||
let mut buf = [0u8; 256];
|
||||
let mut attempt = 0;
|
||||
let mut backoff = Duration::from_secs(1);
|
||||
loop {
|
||||
socket
|
||||
.send(&req)
|
||||
.await
|
||||
.map_err(|e| ProxyError::Proxy(format!("STUN send failed: {e}")))?;
|
||||
|
||||
let recv_res = timeout(Duration::from_secs(3), socket.recv(&mut buf)).await;
|
||||
let n = match recv_res {
|
||||
Ok(Ok(n)) => n,
|
||||
Ok(Err(e)) => return Err(ProxyError::Proxy(format!("STUN recv failed: {e}"))),
|
||||
Err(_) => {
|
||||
attempt += 1;
|
||||
if attempt >= 3 {
|
||||
return Ok(None);
|
||||
}
|
||||
sleep(backoff).await;
|
||||
backoff *= 2;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
if n < 20 {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let magic = 0x2112A442u32.to_be_bytes();
|
||||
let txid = &req[8..20];
|
||||
let mut idx = 20;
|
||||
while idx + 4 <= n {
|
||||
let atype = u16::from_be_bytes(buf[idx..idx + 2].try_into().unwrap());
|
||||
let alen = u16::from_be_bytes(buf[idx + 2..idx + 4].try_into().unwrap()) as usize;
|
||||
idx += 4;
|
||||
if idx + alen > n {
|
||||
break;
|
||||
}
|
||||
|
||||
match atype {
|
||||
0x0020 /* XOR-MAPPED-ADDRESS */ | 0x0001 /* MAPPED-ADDRESS */ => {
|
||||
if alen < 8 {
|
||||
break;
|
||||
}
|
||||
let family_byte = buf[idx + 1];
|
||||
let port_bytes = [buf[idx + 2], buf[idx + 3]];
|
||||
let len_check = match family_byte {
|
||||
0x01 => 4,
|
||||
0x02 => 16,
|
||||
_ => 0,
|
||||
};
|
||||
if len_check == 0 || alen < 4 + len_check {
|
||||
break;
|
||||
}
|
||||
|
||||
let raw_ip = &buf[idx + 4..idx + 4 + len_check];
|
||||
let mut port = u16::from_be_bytes(port_bytes);
|
||||
|
||||
let reflected_ip = if atype == 0x0020 {
|
||||
port ^= ((magic[0] as u16) << 8) | magic[1] as u16;
|
||||
match family_byte {
|
||||
0x01 => {
|
||||
let ip = [
|
||||
raw_ip[0] ^ magic[0],
|
||||
raw_ip[1] ^ magic[1],
|
||||
raw_ip[2] ^ magic[2],
|
||||
raw_ip[3] ^ magic[3],
|
||||
];
|
||||
IpAddr::V4(Ipv4Addr::new(ip[0], ip[1], ip[2], ip[3]))
|
||||
}
|
||||
0x02 => {
|
||||
let mut ip = [0u8; 16];
|
||||
let xor_key = [magic.as_slice(), txid].concat();
|
||||
for (i, b) in raw_ip.iter().enumerate().take(16) {
|
||||
ip[i] = *b ^ xor_key[i];
|
||||
}
|
||||
IpAddr::V6(Ipv6Addr::from(ip))
|
||||
}
|
||||
_ => {
|
||||
idx += (alen + 3) & !3;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match family_byte {
|
||||
0x01 => IpAddr::V4(Ipv4Addr::new(raw_ip[0], raw_ip[1], raw_ip[2], raw_ip[3])),
|
||||
0x02 => IpAddr::V6(Ipv6Addr::from(<[u8; 16]>::try_from(raw_ip).unwrap())),
|
||||
_ => {
|
||||
idx += (alen + 3) & !3;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let reflected_addr = SocketAddr::new(reflected_ip, port);
|
||||
let local_addr = socket
|
||||
.local_addr()
|
||||
.map_err(|e| ProxyError::Proxy(format!("STUN local_addr failed: {e}")))?;
|
||||
|
||||
return Ok(Some(StunProbeResult {
|
||||
local_addr,
|
||||
reflected_addr,
|
||||
family,
|
||||
}));
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
idx += (alen + 3) & !3;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn resolve_stun_addr(stun_addr: &str, family: IpFamily) -> Result<Option<SocketAddr>> {
|
||||
if let Ok(addr) = stun_addr.parse::<SocketAddr>() {
|
||||
return Ok(match (addr.is_ipv4(), family) {
|
||||
(true, IpFamily::V4) | (false, IpFamily::V6) => Some(addr),
|
||||
_ => None,
|
||||
});
|
||||
}
|
||||
|
||||
if let Some((host, port)) = split_host_port(stun_addr)
|
||||
&& let Some(ip) = resolve(&host, port)
|
||||
{
|
||||
let addr = SocketAddr::new(ip, port);
|
||||
return Ok(match (addr.is_ipv4(), family) {
|
||||
(true, IpFamily::V4) | (false, IpFamily::V6) => Some(addr),
|
||||
_ => None,
|
||||
});
|
||||
}
|
||||
|
||||
let mut addrs = lookup_host(stun_addr)
|
||||
.await
|
||||
.map_err(|e| ProxyError::Proxy(format!("STUN resolve failed: {e}")))?;
|
||||
|
||||
let target = addrs
|
||||
.find(|a| matches!((a.is_ipv4(), family), (true, IpFamily::V4) | (false, IpFamily::V6)));
|
||||
Ok(target)
|
||||
}
|
||||
@@ -1,6 +1,10 @@
|
||||
//! Protocol constants and datacenter addresses
|
||||
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
|
||||
use crate::crypto::SecureRandom;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
// ============= Telegram Datacenters =============
|
||||
@@ -151,7 +155,32 @@ pub const TLS_RECORD_ALERT: u8 = 0x15;
|
||||
/// Maximum TLS record size
|
||||
pub const MAX_TLS_RECORD_SIZE: usize = 16384;
|
||||
/// Maximum TLS chunk size (with overhead)
|
||||
pub const MAX_TLS_CHUNK_SIZE: usize = 16384 + 24;
|
||||
/// RFC 8446 §5.2 allows up to 16384 + 256 bytes of ciphertext
|
||||
pub const MAX_TLS_CHUNK_SIZE: usize = 16384 + 256;
|
||||
|
||||
/// Secure Intermediate payload is expected to be 4-byte aligned.
|
||||
pub fn is_valid_secure_payload_len(data_len: usize) -> bool {
|
||||
data_len.is_multiple_of(4)
|
||||
}
|
||||
|
||||
/// Compute Secure Intermediate payload length from wire length.
|
||||
/// Secure mode strips up to 3 random tail bytes by truncating to 4-byte boundary.
|
||||
pub fn secure_payload_len_from_wire_len(wire_len: usize) -> Option<usize> {
|
||||
if wire_len < 4 {
|
||||
return None;
|
||||
}
|
||||
Some(wire_len - (wire_len % 4))
|
||||
}
|
||||
|
||||
/// Generate padding length for Secure Intermediate protocol.
|
||||
/// Data must be 4-byte aligned; padding is 1..=3 so total is never divisible by 4.
|
||||
pub fn secure_padding_len(data_len: usize, rng: &SecureRandom) -> usize {
|
||||
debug_assert!(
|
||||
is_valid_secure_payload_len(data_len),
|
||||
"Secure payload must be 4-byte aligned, got {data_len}"
|
||||
);
|
||||
rng.range(3) + 1
|
||||
}
|
||||
|
||||
// ============= Timeouts =============
|
||||
|
||||
@@ -202,7 +231,6 @@ pub static RESERVED_NONCE_CONTINUES: &[[u8; 4]] = &[
|
||||
// ============= RPC Constants (for Middle Proxy) =============
|
||||
|
||||
/// RPC Proxy Request
|
||||
|
||||
/// RPC Flags (from Erlang mtp_rpc.erl)
|
||||
pub const RPC_FLAG_NOT_ENCRYPTED: u32 = 0x2;
|
||||
pub const RPC_FLAG_HAS_AD_TAG: u32 = 0x8;
|
||||
@@ -284,6 +312,10 @@ pub mod rpc_flags {
|
||||
pub const FLAG_ABRIDGED: u32 = 0x40000000;
|
||||
pub const FLAG_QUICKACK: u32 = 0x80000000;
|
||||
}
|
||||
|
||||
pub mod rpc_crypto_flags {
|
||||
pub const USE_CRC32C: u32 = 0x800;
|
||||
}
|
||||
|
||||
pub const ME_CONNECT_TIMEOUT_SECS: u64 = 5;
|
||||
pub const ME_HANDSHAKE_TIMEOUT_SECS: u64 = 10;
|
||||
@@ -319,4 +351,43 @@ mod tests {
|
||||
assert_eq!(TG_DATACENTERS_V4.len(), 5);
|
||||
assert_eq!(TG_DATACENTERS_V6.len(), 5);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn secure_padding_never_produces_aligned_total() {
|
||||
let rng = SecureRandom::new();
|
||||
for data_len in (0..1000).step_by(4) {
|
||||
for _ in 0..100 {
|
||||
let padding = secure_padding_len(data_len, &rng);
|
||||
assert!(
|
||||
padding <= 3,
|
||||
"padding out of range: data_len={data_len}, padding={padding}"
|
||||
);
|
||||
assert_ne!(
|
||||
(data_len + padding) % 4,
|
||||
0,
|
||||
"invariant violated: data_len={data_len}, padding={padding}, total={}",
|
||||
data_len + padding
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn secure_wire_len_roundtrip_for_aligned_payload() {
|
||||
for payload_len in (4..4096).step_by(4) {
|
||||
for padding in 0..=3usize {
|
||||
let wire_len = payload_len + padding;
|
||||
let recovered = secure_payload_len_from_wire_len(wire_len);
|
||||
assert_eq!(recovered, Some(payload_len));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn secure_wire_len_rejects_too_short_frames() {
|
||||
assert_eq!(secure_payload_len_from_wire_len(0), None);
|
||||
assert_eq!(secure_payload_len_from_wire_len(1), None);
|
||||
assert_eq!(secure_payload_len_from_wire_len(2), None);
|
||||
assert_eq!(secure_payload_len_from_wire_len(3), None);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
//! MTProto frame types and metadata
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Extra metadata associated with a frame
|
||||
@@ -83,7 +85,7 @@ impl FrameMode {
|
||||
pub fn validate_message_length(len: usize) -> bool {
|
||||
use super::constants::{MIN_MSG_LEN, MAX_MSG_LEN, PADDING_FILLER};
|
||||
|
||||
len >= MIN_MSG_LEN && len <= MAX_MSG_LEN && len % PADDING_FILLER.len() == 0
|
||||
(MIN_MSG_LEN..=MAX_MSG_LEN).contains(&len) && len.is_multiple_of(PADDING_FILLER.len())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -5,7 +5,11 @@ pub mod frame;
|
||||
pub mod obfuscation;
|
||||
pub mod tls;
|
||||
|
||||
#[allow(unused_imports)]
|
||||
pub use constants::*;
|
||||
#[allow(unused_imports)]
|
||||
pub use frame::*;
|
||||
#[allow(unused_imports)]
|
||||
pub use obfuscation::*;
|
||||
#[allow(unused_imports)]
|
||||
pub use tls::*;
|
||||
@@ -1,8 +1,9 @@
|
||||
//! MTProto Obfuscation
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use zeroize::Zeroize;
|
||||
use crate::crypto::{sha256, AesCtr};
|
||||
use crate::error::Result;
|
||||
use super::constants::*;
|
||||
|
||||
/// Obfuscation parameters from handshake
|
||||
@@ -160,6 +161,12 @@ pub fn prepare_tg_nonce(
|
||||
}
|
||||
|
||||
/// Encrypt the outgoing nonce for Telegram
|
||||
/// Legacy helper — **do not use**.
|
||||
/// WARNING: logic diverges from Python/C reference (SHA256 of 48 bytes, IV from head).
|
||||
/// Kept only to avoid breaking external callers; prefer `encrypt_tg_nonce_with_ciphers`.
|
||||
#[deprecated(
|
||||
note = "Incorrect MTProto obfuscation KDF; use proxy::handshake::encrypt_tg_nonce_with_ciphers"
|
||||
)]
|
||||
pub fn encrypt_nonce(nonce: &[u8; HANDSHAKE_LEN]) -> Vec<u8> {
|
||||
let key_iv = &nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN];
|
||||
let enc_key = sha256(key_iv);
|
||||
@@ -208,4 +215,4 @@ mod tests {
|
||||
assert!(is_valid_nonce(&nonce));
|
||||
assert_eq!(nonce.len(), HANDSHAKE_LEN);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,10 +4,15 @@
|
||||
//! for domain fronting. The handshake looks like valid TLS 1.3 but
|
||||
//! actually carries MTProto authentication data.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use crate::crypto::{sha256_hmac, SecureRandom};
|
||||
use crate::error::{ProxyError, Result};
|
||||
#[cfg(test)]
|
||||
use crate::error::ProxyError;
|
||||
use super::constants::*;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use num_bigint::BigUint;
|
||||
use num_traits::One;
|
||||
|
||||
// ============= Public Constants =============
|
||||
|
||||
@@ -30,6 +35,7 @@ pub const TIME_SKEW_MAX: i64 = 10 * 60; // 10 minutes after
|
||||
mod extension_type {
|
||||
pub const KEY_SHARE: u16 = 0x0033;
|
||||
pub const SUPPORTED_VERSIONS: u16 = 0x002b;
|
||||
pub const ALPN: u16 = 0x0010;
|
||||
}
|
||||
|
||||
/// TLS Cipher Suites
|
||||
@@ -60,6 +66,7 @@ pub struct TlsValidation {
|
||||
// ============= TLS Extension Builder =============
|
||||
|
||||
/// Builder for TLS extensions with correct length calculation
|
||||
#[derive(Clone)]
|
||||
struct TlsExtensionBuilder {
|
||||
extensions: Vec<u8>,
|
||||
}
|
||||
@@ -106,6 +113,27 @@ impl TlsExtensionBuilder {
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Add ALPN extension with a single selected protocol.
|
||||
fn add_alpn(&mut self, proto: &[u8]) -> &mut Self {
|
||||
// Extension type: ALPN (0x0010)
|
||||
self.extensions.extend_from_slice(&extension_type::ALPN.to_be_bytes());
|
||||
|
||||
// ALPN extension format:
|
||||
// extension_data length (2 bytes)
|
||||
// protocols length (2 bytes)
|
||||
// protocol name length (1 byte)
|
||||
// protocol name bytes
|
||||
let proto_len = proto.len() as u8;
|
||||
let list_len: u16 = 1 + proto_len as u16;
|
||||
let ext_len: u16 = 2 + list_len;
|
||||
|
||||
self.extensions.extend_from_slice(&ext_len.to_be_bytes());
|
||||
self.extensions.extend_from_slice(&list_len.to_be_bytes());
|
||||
self.extensions.push(proto_len);
|
||||
self.extensions.extend_from_slice(proto);
|
||||
self
|
||||
}
|
||||
|
||||
/// Build final extensions with length prefix
|
||||
fn build(self) -> Vec<u8> {
|
||||
@@ -142,6 +170,8 @@ struct ServerHelloBuilder {
|
||||
compression: u8,
|
||||
/// Extensions
|
||||
extensions: TlsExtensionBuilder,
|
||||
/// Selected ALPN protocol (if any)
|
||||
alpn: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl ServerHelloBuilder {
|
||||
@@ -152,6 +182,7 @@ impl ServerHelloBuilder {
|
||||
cipher_suite: cipher_suite::TLS_AES_128_GCM_SHA256,
|
||||
compression: 0x00,
|
||||
extensions: TlsExtensionBuilder::new(),
|
||||
alpn: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -165,10 +196,19 @@ impl ServerHelloBuilder {
|
||||
self.extensions.add_supported_versions(0x0304);
|
||||
self
|
||||
}
|
||||
|
||||
fn with_alpn(mut self, proto: Option<Vec<u8>>) -> Self {
|
||||
self.alpn = proto;
|
||||
self
|
||||
}
|
||||
|
||||
/// Build ServerHello message (without record header)
|
||||
fn build_message(&self) -> Vec<u8> {
|
||||
let extensions = self.extensions.extensions.clone();
|
||||
let mut ext_builder = self.extensions.clone();
|
||||
if let Some(ref alpn) = self.alpn {
|
||||
ext_builder.add_alpn(alpn);
|
||||
}
|
||||
let extensions = ext_builder.extensions.clone();
|
||||
let extensions_len = extensions.len() as u16;
|
||||
|
||||
// Calculate total length
|
||||
@@ -295,7 +335,7 @@ pub fn validate_tls_handshake(
|
||||
// This is a quirk in some clients that use uptime instead of real time
|
||||
let is_boot_time = timestamp < 60 * 60 * 24 * 1000; // < ~2.7 years in seconds
|
||||
|
||||
if !is_boot_time && (time_diff < TIME_SKEW_MIN || time_diff > TIME_SKEW_MAX) {
|
||||
if !is_boot_time && !(TIME_SKEW_MIN..=TIME_SKEW_MAX).contains(&time_diff) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@@ -311,13 +351,27 @@ pub fn validate_tls_handshake(
|
||||
None
|
||||
}
|
||||
|
||||
fn curve25519_prime() -> BigUint {
|
||||
(BigUint::one() << 255) - BigUint::from(19u32)
|
||||
}
|
||||
|
||||
/// Generate a fake X25519 public key for TLS
|
||||
///
|
||||
/// This generates random bytes that look like a valid X25519 public key.
|
||||
/// Since we're not doing real TLS, the actual cryptographic properties don't matter.
|
||||
/// Produces a quadratic residue mod p = 2^255 - 19 by computing n² mod p,
|
||||
/// which matches Python/C behavior and avoids DPI fingerprinting.
|
||||
pub fn gen_fake_x25519_key(rng: &SecureRandom) -> [u8; 32] {
|
||||
let bytes = rng.bytes(32);
|
||||
bytes.try_into().unwrap()
|
||||
let mut n_bytes = [0u8; 32];
|
||||
n_bytes.copy_from_slice(&rng.bytes(32));
|
||||
|
||||
let n = BigUint::from_bytes_le(&n_bytes);
|
||||
let p = curve25519_prime();
|
||||
let pk = (&n * &n) % &p;
|
||||
|
||||
let mut out = pk.to_bytes_le();
|
||||
out.resize(32, 0);
|
||||
let mut result = [0u8; 32];
|
||||
result.copy_from_slice(&out[..32]);
|
||||
result
|
||||
}
|
||||
|
||||
/// Build TLS ServerHello response
|
||||
@@ -334,13 +388,19 @@ pub fn build_server_hello(
|
||||
session_id: &[u8],
|
||||
fake_cert_len: usize,
|
||||
rng: &SecureRandom,
|
||||
alpn: Option<Vec<u8>>,
|
||||
new_session_tickets: u8,
|
||||
) -> Vec<u8> {
|
||||
const MIN_APP_DATA: usize = 64;
|
||||
const MAX_APP_DATA: usize = 16640; // RFC 8446 §5.2 upper bound
|
||||
let fake_cert_len = fake_cert_len.clamp(MIN_APP_DATA, MAX_APP_DATA);
|
||||
let x25519_key = gen_fake_x25519_key(rng);
|
||||
|
||||
// Build ServerHello
|
||||
let server_hello = ServerHelloBuilder::new(session_id.to_vec())
|
||||
.with_x25519_key(&x25519_key)
|
||||
.with_tls13_version()
|
||||
.with_alpn(alpn)
|
||||
.build_record();
|
||||
|
||||
// Build Change Cipher Spec record
|
||||
@@ -357,15 +417,35 @@ pub fn build_server_hello(
|
||||
app_data_record.push(TLS_RECORD_APPLICATION);
|
||||
app_data_record.extend_from_slice(&TLS_VERSION);
|
||||
app_data_record.extend_from_slice(&(fake_cert_len as u16).to_be_bytes());
|
||||
// Fill ApplicationData with fully random bytes of desired length to avoid
|
||||
// deterministic DPI fingerprints (fixed inner content type markers).
|
||||
app_data_record.extend_from_slice(&fake_cert);
|
||||
|
||||
// Build optional NewSessionTicket records (TLS 1.3 handshake messages are encrypted;
|
||||
// here we mimic with opaque ApplicationData records of plausible size).
|
||||
let mut tickets = Vec::new();
|
||||
if new_session_tickets > 0 {
|
||||
for _ in 0..new_session_tickets {
|
||||
let ticket_len: usize = rng.range(48) + 48; // 48-95 bytes
|
||||
let mut record = Vec::with_capacity(5 + ticket_len);
|
||||
record.push(TLS_RECORD_APPLICATION);
|
||||
record.extend_from_slice(&TLS_VERSION);
|
||||
record.extend_from_slice(&(ticket_len as u16).to_be_bytes());
|
||||
record.extend_from_slice(&rng.bytes(ticket_len));
|
||||
tickets.push(record);
|
||||
}
|
||||
}
|
||||
|
||||
// Combine all records
|
||||
let mut response = Vec::with_capacity(
|
||||
server_hello.len() + change_cipher_spec.len() + app_data_record.len()
|
||||
server_hello.len() + change_cipher_spec.len() + app_data_record.len() + tickets.iter().map(|r| r.len()).sum::<usize>()
|
||||
);
|
||||
response.extend_from_slice(&server_hello);
|
||||
response.extend_from_slice(&change_cipher_spec);
|
||||
response.extend_from_slice(&app_data_record);
|
||||
for t in &tickets {
|
||||
response.extend_from_slice(t);
|
||||
}
|
||||
|
||||
// Compute HMAC for the response
|
||||
let mut hmac_input = Vec::with_capacity(TLS_DIGEST_LEN + response.len());
|
||||
@@ -381,6 +461,131 @@ pub fn build_server_hello(
|
||||
response
|
||||
}
|
||||
|
||||
/// Extract SNI (server_name) from a TLS ClientHello.
|
||||
pub fn extract_sni_from_client_hello(handshake: &[u8]) -> Option<String> {
|
||||
if handshake.len() < 43 || handshake[0] != TLS_RECORD_HANDSHAKE {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut pos = 5; // after record header
|
||||
if handshake.get(pos).copied()? != 0x01 {
|
||||
return None; // not ClientHello
|
||||
}
|
||||
|
||||
// Handshake length bytes
|
||||
pos += 4; // type + len (3)
|
||||
|
||||
// version (2) + random (32)
|
||||
pos += 2 + 32;
|
||||
if pos + 1 > handshake.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let session_id_len = *handshake.get(pos)? as usize;
|
||||
pos += 1 + session_id_len;
|
||||
if pos + 2 > handshake.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let cipher_suites_len = u16::from_be_bytes([handshake[pos], handshake[pos + 1]]) as usize;
|
||||
pos += 2 + cipher_suites_len;
|
||||
if pos + 1 > handshake.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let comp_len = *handshake.get(pos)? as usize;
|
||||
pos += 1 + comp_len;
|
||||
if pos + 2 > handshake.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let ext_len = u16::from_be_bytes([handshake[pos], handshake[pos + 1]]) as usize;
|
||||
pos += 2;
|
||||
let ext_end = pos + ext_len;
|
||||
if ext_end > handshake.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
while pos + 4 <= ext_end {
|
||||
let etype = u16::from_be_bytes([handshake[pos], handshake[pos + 1]]);
|
||||
let elen = u16::from_be_bytes([handshake[pos + 2], handshake[pos + 3]]) as usize;
|
||||
pos += 4;
|
||||
if pos + elen > ext_end {
|
||||
break;
|
||||
}
|
||||
if etype == 0x0000 && elen >= 5 {
|
||||
// server_name extension
|
||||
let list_len = u16::from_be_bytes([handshake[pos], handshake[pos + 1]]) as usize;
|
||||
let mut sn_pos = pos + 2;
|
||||
let sn_end = std::cmp::min(sn_pos + list_len, pos + elen);
|
||||
while sn_pos + 3 <= sn_end {
|
||||
let name_type = handshake[sn_pos];
|
||||
let name_len = u16::from_be_bytes([handshake[sn_pos + 1], handshake[sn_pos + 2]]) as usize;
|
||||
sn_pos += 3;
|
||||
if sn_pos + name_len > sn_end {
|
||||
break;
|
||||
}
|
||||
if name_type == 0 && name_len > 0
|
||||
&& let Ok(host) = std::str::from_utf8(&handshake[sn_pos..sn_pos + name_len])
|
||||
{
|
||||
return Some(host.to_string());
|
||||
}
|
||||
sn_pos += name_len;
|
||||
}
|
||||
}
|
||||
pos += elen;
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Extract ALPN protocol list from ClientHello, return in offered order.
|
||||
pub fn extract_alpn_from_client_hello(handshake: &[u8]) -> Vec<Vec<u8>> {
|
||||
let mut pos = 5; // after record header
|
||||
if handshake.get(pos) != Some(&0x01) {
|
||||
return Vec::new();
|
||||
}
|
||||
pos += 4; // type + len
|
||||
pos += 2 + 32; // version + random
|
||||
if pos >= handshake.len() { return Vec::new(); }
|
||||
let session_id_len = *handshake.get(pos).unwrap_or(&0) as usize;
|
||||
pos += 1 + session_id_len;
|
||||
if pos + 2 > handshake.len() { return Vec::new(); }
|
||||
let cipher_len = u16::from_be_bytes([handshake[pos], handshake[pos+1]]) as usize;
|
||||
pos += 2 + cipher_len;
|
||||
if pos >= handshake.len() { return Vec::new(); }
|
||||
let comp_len = *handshake.get(pos).unwrap_or(&0) as usize;
|
||||
pos += 1 + comp_len;
|
||||
if pos + 2 > handshake.len() { return Vec::new(); }
|
||||
let ext_len = u16::from_be_bytes([handshake[pos], handshake[pos+1]]) as usize;
|
||||
pos += 2;
|
||||
let ext_end = pos + ext_len;
|
||||
if ext_end > handshake.len() { return Vec::new(); }
|
||||
let mut out = Vec::new();
|
||||
while pos + 4 <= ext_end {
|
||||
let etype = u16::from_be_bytes([handshake[pos], handshake[pos+1]]);
|
||||
let elen = u16::from_be_bytes([handshake[pos+2], handshake[pos+3]]) as usize;
|
||||
pos += 4;
|
||||
if pos + elen > ext_end { break; }
|
||||
if etype == extension_type::ALPN && elen >= 3 {
|
||||
let list_len = u16::from_be_bytes([handshake[pos], handshake[pos+1]]) as usize;
|
||||
let mut lp = pos + 2;
|
||||
let list_end = (pos + 2).saturating_add(list_len).min(pos + elen);
|
||||
while lp < list_end {
|
||||
let plen = handshake[lp] as usize;
|
||||
lp += 1;
|
||||
if lp + plen > list_end { break; }
|
||||
out.push(handshake[lp..lp+plen].to_vec());
|
||||
lp += plen;
|
||||
}
|
||||
break;
|
||||
}
|
||||
pos += elen;
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
|
||||
/// Check if bytes look like a TLS ClientHello
|
||||
pub fn is_tls_handshake(first_bytes: &[u8]) -> bool {
|
||||
if first_bytes.len() < 3 {
|
||||
@@ -411,7 +616,7 @@ pub fn parse_tls_record_header(header: &[u8; 5]) -> Option<(u8, u16)> {
|
||||
///
|
||||
/// This is useful for testing that our ServerHello is well-formed.
|
||||
#[cfg(test)]
|
||||
fn validate_server_hello_structure(data: &[u8]) -> Result<()> {
|
||||
fn validate_server_hello_structure(data: &[u8]) -> Result<(), ProxyError> {
|
||||
if data.len() < 5 {
|
||||
return Err(ProxyError::InvalidTlsRecord {
|
||||
record_type: 0,
|
||||
@@ -498,6 +703,17 @@ mod tests {
|
||||
assert_eq!(key2.len(), 32);
|
||||
assert_ne!(key1, key2); // Should be random
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fake_x25519_key_is_quadratic_residue() {
|
||||
let rng = SecureRandom::new();
|
||||
let key = gen_fake_x25519_key(&rng);
|
||||
let p = curve25519_prime();
|
||||
let k_num = BigUint::from_bytes_le(&key);
|
||||
let exponent = (&p - BigUint::one()) >> 1;
|
||||
let legendre = k_num.modpow(&exponent, &p);
|
||||
assert_eq!(legendre, BigUint::one());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tls_extension_builder() {
|
||||
@@ -548,7 +764,7 @@ mod tests {
|
||||
let session_id = vec![0xAA; 32];
|
||||
|
||||
let rng = SecureRandom::new();
|
||||
let response = build_server_hello(secret, &client_digest, &session_id, 2048, &rng);
|
||||
let response = build_server_hello(secret, &client_digest, &session_id, 2048, &rng, None, 0);
|
||||
|
||||
// Should have at least 3 records
|
||||
assert!(response.len() > 100);
|
||||
@@ -581,8 +797,8 @@ mod tests {
|
||||
let session_id = vec![0xAA; 32];
|
||||
|
||||
let rng = SecureRandom::new();
|
||||
let response1 = build_server_hello(secret, &client_digest, &session_id, 1024, &rng);
|
||||
let response2 = build_server_hello(secret, &client_digest, &session_id, 1024, &rng);
|
||||
let response1 = build_server_hello(secret, &client_digest, &session_id, 1024, &rng, None, 0);
|
||||
let response2 = build_server_hello(secret, &client_digest, &session_id, 1024, &rng, None, 0);
|
||||
|
||||
// Digest position should have non-zero data
|
||||
let digest1 = &response1[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN];
|
||||
@@ -641,4 +857,101 @@ mod tests {
|
||||
// Should return None (no match) but not panic
|
||||
assert!(result.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
fn build_client_hello_with_exts(exts: Vec<(u16, Vec<u8>)>, host: &str) -> Vec<u8> {
|
||||
let mut body = Vec::new();
|
||||
body.extend_from_slice(&TLS_VERSION); // legacy version
|
||||
body.extend_from_slice(&[0u8; 32]); // random
|
||||
body.push(0); // session id len
|
||||
body.extend_from_slice(&2u16.to_be_bytes()); // cipher suites len
|
||||
body.extend_from_slice(&[0x13, 0x01]); // TLS_AES_128_GCM_SHA256
|
||||
body.push(1); // compression len
|
||||
body.push(0); // null compression
|
||||
|
||||
// Build SNI extension
|
||||
let host_bytes = host.as_bytes();
|
||||
let mut sni_ext = Vec::new();
|
||||
sni_ext.extend_from_slice(&(host_bytes.len() as u16 + 3).to_be_bytes());
|
||||
sni_ext.push(0);
|
||||
sni_ext.extend_from_slice(&(host_bytes.len() as u16).to_be_bytes());
|
||||
sni_ext.extend_from_slice(host_bytes);
|
||||
|
||||
let mut ext_blob = Vec::new();
|
||||
for (typ, data) in exts {
|
||||
ext_blob.extend_from_slice(&typ.to_be_bytes());
|
||||
ext_blob.extend_from_slice(&(data.len() as u16).to_be_bytes());
|
||||
ext_blob.extend_from_slice(&data);
|
||||
}
|
||||
// SNI last
|
||||
ext_blob.extend_from_slice(&0x0000u16.to_be_bytes());
|
||||
ext_blob.extend_from_slice(&(sni_ext.len() as u16).to_be_bytes());
|
||||
ext_blob.extend_from_slice(&sni_ext);
|
||||
|
||||
body.extend_from_slice(&(ext_blob.len() as u16).to_be_bytes());
|
||||
body.extend_from_slice(&ext_blob);
|
||||
|
||||
let mut handshake = Vec::new();
|
||||
handshake.push(0x01); // ClientHello
|
||||
let len_bytes = (body.len() as u32).to_be_bytes();
|
||||
handshake.extend_from_slice(&len_bytes[1..4]);
|
||||
handshake.extend_from_slice(&body);
|
||||
|
||||
let mut record = Vec::new();
|
||||
record.push(TLS_RECORD_HANDSHAKE);
|
||||
record.extend_from_slice(&[0x03, 0x01]);
|
||||
record.extend_from_slice(&(handshake.len() as u16).to_be_bytes());
|
||||
record.extend_from_slice(&handshake);
|
||||
record
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_sni_with_grease_extension() {
|
||||
// GREASE type 0x0a0a with zero length before SNI
|
||||
let ch = build_client_hello_with_exts(vec![(0x0a0a, Vec::new())], "example.com");
|
||||
let sni = extract_sni_from_client_hello(&ch);
|
||||
assert_eq!(sni.as_deref(), Some("example.com"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_sni_tolerates_empty_unknown_extension() {
|
||||
let ch = build_client_hello_with_exts(vec![(0x1234, Vec::new())], "test.local");
|
||||
let sni = extract_sni_from_client_hello(&ch);
|
||||
assert_eq!(sni.as_deref(), Some("test.local"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_alpn_single() {
|
||||
let mut alpn_data = Vec::new();
|
||||
// list length = 3 (1 length byte + "h2")
|
||||
alpn_data.extend_from_slice(&3u16.to_be_bytes());
|
||||
alpn_data.push(2);
|
||||
alpn_data.extend_from_slice(b"h2");
|
||||
let ch = build_client_hello_with_exts(vec![(0x0010, alpn_data)], "alpn.test");
|
||||
let alpn = extract_alpn_from_client_hello(&ch);
|
||||
let alpn_str: Vec<String> = alpn
|
||||
.iter()
|
||||
.map(|p| std::str::from_utf8(p).unwrap().to_string())
|
||||
.collect();
|
||||
assert_eq!(alpn_str, vec!["h2"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_alpn_multiple() {
|
||||
let mut alpn_data = Vec::new();
|
||||
// list length = 11 (sum of per-proto lengths including length bytes)
|
||||
alpn_data.extend_from_slice(&11u16.to_be_bytes());
|
||||
alpn_data.push(2);
|
||||
alpn_data.extend_from_slice(b"h2");
|
||||
alpn_data.push(4);
|
||||
alpn_data.extend_from_slice(b"spdy");
|
||||
alpn_data.push(2);
|
||||
alpn_data.extend_from_slice(b"h3");
|
||||
let ch = build_client_hello_with_exts(vec![(0x0010, alpn_data)], "alpn.test");
|
||||
let alpn = extract_alpn_from_client_hello(&ch);
|
||||
let alpn_str: Vec<String> = alpn
|
||||
.iter()
|
||||
.map(|p| std::str::from_utf8(p).unwrap().to_string())
|
||||
.collect();
|
||||
assert_eq!(alpn_str, vec!["h2", "spdy", "h3"]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
//! Client Handler
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::future::Future;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite};
|
||||
@@ -8,21 +10,308 @@ use tokio::net::TcpStream;
|
||||
use tokio::time::timeout;
|
||||
use tracing::{debug, warn};
|
||||
|
||||
/// Post-handshake future (relay phase, runs outside handshake timeout)
|
||||
type PostHandshakeFuture = Pin<Box<dyn Future<Output = Result<()>> + Send>>;
|
||||
|
||||
/// Result of the handshake phase
|
||||
enum HandshakeOutcome {
|
||||
/// Handshake succeeded, relay work to do (outside timeout)
|
||||
NeedsRelay(PostHandshakeFuture),
|
||||
/// Already fully handled (bad client masking, etc.)
|
||||
Handled,
|
||||
}
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::error::{HandshakeResult, ProxyError, Result};
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::protocol::constants::*;
|
||||
use crate::protocol::tls;
|
||||
use crate::stats::beobachten::BeobachtenStore;
|
||||
use crate::stats::{ReplayChecker, Stats};
|
||||
use crate::stream::{BufferPool, CryptoReader, CryptoWriter};
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
use crate::transport::{UpstreamManager, configure_client_socket};
|
||||
use crate::transport::{UpstreamManager, configure_client_socket, parse_proxy_protocol};
|
||||
use crate::transport::socket::normalize_ip;
|
||||
use crate::tls_front::TlsFrontCache;
|
||||
|
||||
use crate::proxy::direct_relay::handle_via_direct;
|
||||
use crate::proxy::handshake::{HandshakeSuccess, handle_mtproto_handshake, handle_tls_handshake};
|
||||
use crate::proxy::masking::handle_bad_client;
|
||||
use crate::proxy::middle_relay::handle_via_middle_proxy;
|
||||
|
||||
fn beobachten_ttl(config: &ProxyConfig) -> Duration {
|
||||
Duration::from_secs(config.general.beobachten_minutes.saturating_mul(60))
|
||||
}
|
||||
|
||||
fn record_beobachten_class(
|
||||
beobachten: &BeobachtenStore,
|
||||
config: &ProxyConfig,
|
||||
peer_ip: IpAddr,
|
||||
class: &str,
|
||||
) {
|
||||
if !config.general.beobachten {
|
||||
return;
|
||||
}
|
||||
beobachten.record(class, peer_ip, beobachten_ttl(config));
|
||||
}
|
||||
|
||||
fn record_handshake_failure_class(
|
||||
beobachten: &BeobachtenStore,
|
||||
config: &ProxyConfig,
|
||||
peer_ip: IpAddr,
|
||||
error: &ProxyError,
|
||||
) {
|
||||
let class = if error.to_string().contains("expected 64 bytes, got 0") {
|
||||
"expected_64_got_0"
|
||||
} else {
|
||||
"other"
|
||||
};
|
||||
record_beobachten_class(beobachten, config, peer_ip, class);
|
||||
}
|
||||
|
||||
pub async fn handle_client_stream<S>(
|
||||
mut stream: S,
|
||||
peer: SocketAddr,
|
||||
config: Arc<ProxyConfig>,
|
||||
stats: Arc<Stats>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
replay_checker: Arc<ReplayChecker>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
proxy_protocol_enabled: bool,
|
||||
) -> Result<()>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
stats.increment_connects_all();
|
||||
let mut real_peer = normalize_ip(peer);
|
||||
|
||||
// For non-TCP streams, use a synthetic local address; may be overridden by PROXY protocol dst
|
||||
let mut local_addr: SocketAddr = format!("0.0.0.0:{}", config.server.port)
|
||||
.parse()
|
||||
.unwrap_or_else(|_| "0.0.0.0:443".parse().unwrap());
|
||||
|
||||
if proxy_protocol_enabled {
|
||||
let proxy_header_timeout = Duration::from_millis(
|
||||
config.server.proxy_protocol_header_timeout_ms.max(1),
|
||||
);
|
||||
match timeout(proxy_header_timeout, parse_proxy_protocol(&mut stream, peer)).await {
|
||||
Ok(Ok(info)) => {
|
||||
debug!(
|
||||
peer = %peer,
|
||||
client = %info.src_addr,
|
||||
version = info.version,
|
||||
"PROXY protocol header parsed"
|
||||
);
|
||||
real_peer = normalize_ip(info.src_addr);
|
||||
if let Some(dst) = info.dst_addr {
|
||||
local_addr = dst;
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
stats.increment_connects_bad();
|
||||
warn!(peer = %peer, error = %e, "Invalid PROXY protocol header");
|
||||
record_beobachten_class(&beobachten, &config, peer.ip(), "other");
|
||||
return Err(e);
|
||||
}
|
||||
Err(_) => {
|
||||
stats.increment_connects_bad();
|
||||
warn!(peer = %peer, timeout_ms = proxy_header_timeout.as_millis(), "PROXY protocol header timeout");
|
||||
record_beobachten_class(&beobachten, &config, peer.ip(), "other");
|
||||
return Err(ProxyError::InvalidProxyProtocol);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug!(peer = %real_peer, "New connection (generic stream)");
|
||||
|
||||
let handshake_timeout = Duration::from_secs(config.timeouts.client_handshake);
|
||||
let stats_for_timeout = stats.clone();
|
||||
let config_for_timeout = config.clone();
|
||||
let beobachten_for_timeout = beobachten.clone();
|
||||
let peer_for_timeout = real_peer.ip();
|
||||
|
||||
// Phase 1: handshake (with timeout)
|
||||
let outcome = match timeout(handshake_timeout, async {
|
||||
let mut first_bytes = [0u8; 5];
|
||||
stream.read_exact(&mut first_bytes).await?;
|
||||
|
||||
let is_tls = tls::is_tls_handshake(&first_bytes[..3]);
|
||||
debug!(peer = %real_peer, is_tls = is_tls, "Handshake type detected");
|
||||
|
||||
if is_tls {
|
||||
let tls_len = u16::from_be_bytes([first_bytes[3], first_bytes[4]]) as usize;
|
||||
|
||||
if tls_len < 512 {
|
||||
debug!(peer = %real_peer, tls_len = tls_len, "TLS handshake too short");
|
||||
stats.increment_connects_bad();
|
||||
let (reader, writer) = tokio::io::split(stream);
|
||||
handle_bad_client(
|
||||
reader,
|
||||
writer,
|
||||
&first_bytes,
|
||||
real_peer,
|
||||
local_addr,
|
||||
&config,
|
||||
&beobachten,
|
||||
)
|
||||
.await;
|
||||
return Ok(HandshakeOutcome::Handled);
|
||||
}
|
||||
|
||||
let mut handshake = vec![0u8; 5 + tls_len];
|
||||
handshake[..5].copy_from_slice(&first_bytes);
|
||||
stream.read_exact(&mut handshake[5..]).await?;
|
||||
|
||||
let (read_half, write_half) = tokio::io::split(stream);
|
||||
|
||||
let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake(
|
||||
&handshake, read_half, write_half, real_peer,
|
||||
&config, &replay_checker, &rng, tls_cache.clone(),
|
||||
).await {
|
||||
HandshakeResult::Success(result) => result,
|
||||
HandshakeResult::BadClient { reader, writer } => {
|
||||
stats.increment_connects_bad();
|
||||
handle_bad_client(
|
||||
reader,
|
||||
writer,
|
||||
&handshake,
|
||||
real_peer,
|
||||
local_addr,
|
||||
&config,
|
||||
&beobachten,
|
||||
)
|
||||
.await;
|
||||
return Ok(HandshakeOutcome::Handled);
|
||||
}
|
||||
HandshakeResult::Error(e) => return Err(e),
|
||||
};
|
||||
|
||||
debug!(peer = %peer, "Reading MTProto handshake through TLS");
|
||||
let mtproto_data = tls_reader.read_exact(HANDSHAKE_LEN).await?;
|
||||
let mtproto_handshake: [u8; HANDSHAKE_LEN] = mtproto_data[..].try_into()
|
||||
.map_err(|_| ProxyError::InvalidHandshake("Short MTProto handshake".into()))?;
|
||||
|
||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
||||
&mtproto_handshake, tls_reader, tls_writer, real_peer,
|
||||
&config, &replay_checker, true, Some(tls_user.as_str()),
|
||||
).await {
|
||||
HandshakeResult::Success(result) => result,
|
||||
HandshakeResult::BadClient { reader: _, writer: _ } => {
|
||||
stats.increment_connects_bad();
|
||||
debug!(peer = %peer, "Valid TLS but invalid MTProto handshake");
|
||||
return Ok(HandshakeOutcome::Handled);
|
||||
}
|
||||
HandshakeResult::Error(e) => return Err(e),
|
||||
};
|
||||
|
||||
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||
RunningClientHandler::handle_authenticated_static(
|
||||
crypto_reader, crypto_writer, success,
|
||||
upstream_manager, stats, config, buffer_pool, rng, me_pool,
|
||||
local_addr, real_peer, ip_tracker.clone(),
|
||||
),
|
||||
)))
|
||||
} else {
|
||||
if !config.general.modes.classic && !config.general.modes.secure {
|
||||
debug!(peer = %real_peer, "Non-TLS modes disabled");
|
||||
stats.increment_connects_bad();
|
||||
let (reader, writer) = tokio::io::split(stream);
|
||||
handle_bad_client(
|
||||
reader,
|
||||
writer,
|
||||
&first_bytes,
|
||||
real_peer,
|
||||
local_addr,
|
||||
&config,
|
||||
&beobachten,
|
||||
)
|
||||
.await;
|
||||
return Ok(HandshakeOutcome::Handled);
|
||||
}
|
||||
|
||||
let mut handshake = [0u8; HANDSHAKE_LEN];
|
||||
handshake[..5].copy_from_slice(&first_bytes);
|
||||
stream.read_exact(&mut handshake[5..]).await?;
|
||||
|
||||
let (read_half, write_half) = tokio::io::split(stream);
|
||||
|
||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
||||
&handshake, read_half, write_half, real_peer,
|
||||
&config, &replay_checker, false, None,
|
||||
).await {
|
||||
HandshakeResult::Success(result) => result,
|
||||
HandshakeResult::BadClient { reader, writer } => {
|
||||
stats.increment_connects_bad();
|
||||
handle_bad_client(
|
||||
reader,
|
||||
writer,
|
||||
&handshake,
|
||||
real_peer,
|
||||
local_addr,
|
||||
&config,
|
||||
&beobachten,
|
||||
)
|
||||
.await;
|
||||
return Ok(HandshakeOutcome::Handled);
|
||||
}
|
||||
HandshakeResult::Error(e) => return Err(e),
|
||||
};
|
||||
|
||||
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||
RunningClientHandler::handle_authenticated_static(
|
||||
crypto_reader,
|
||||
crypto_writer,
|
||||
success,
|
||||
upstream_manager,
|
||||
stats,
|
||||
config,
|
||||
buffer_pool,
|
||||
rng,
|
||||
me_pool,
|
||||
local_addr,
|
||||
real_peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
)))
|
||||
}
|
||||
}).await {
|
||||
Ok(Ok(outcome)) => outcome,
|
||||
Ok(Err(e)) => {
|
||||
debug!(peer = %peer, error = %e, "Handshake failed");
|
||||
record_handshake_failure_class(
|
||||
&beobachten_for_timeout,
|
||||
&config_for_timeout,
|
||||
peer_for_timeout,
|
||||
&e,
|
||||
);
|
||||
return Err(e);
|
||||
}
|
||||
Err(_) => {
|
||||
stats_for_timeout.increment_handshake_timeouts();
|
||||
debug!(peer = %peer, "Handshake timeout");
|
||||
record_beobachten_class(
|
||||
&beobachten_for_timeout,
|
||||
&config_for_timeout,
|
||||
peer_for_timeout,
|
||||
"other",
|
||||
);
|
||||
return Err(ProxyError::TgHandshakeTimeout);
|
||||
}
|
||||
};
|
||||
|
||||
// Phase 2: relay (WITHOUT handshake timeout — relay has its own activity timeouts)
|
||||
match outcome {
|
||||
HandshakeOutcome::NeedsRelay(fut) => fut.await,
|
||||
HandshakeOutcome::Handled => Ok(()),
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ClientHandler;
|
||||
|
||||
pub struct RunningClientHandler {
|
||||
@@ -35,6 +324,10 @@ pub struct RunningClientHandler {
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
proxy_protocol_enabled: bool,
|
||||
}
|
||||
|
||||
impl ClientHandler {
|
||||
@@ -48,6 +341,10 @@ impl ClientHandler {
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
proxy_protocol_enabled: bool,
|
||||
) -> RunningClientHandler {
|
||||
RunningClientHandler {
|
||||
stream,
|
||||
@@ -59,6 +356,10 @@ impl ClientHandler {
|
||||
buffer_pool,
|
||||
rng,
|
||||
me_pool,
|
||||
tls_cache,
|
||||
ip_tracker,
|
||||
beobachten,
|
||||
proxy_protocol_enabled,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -67,7 +368,9 @@ impl RunningClientHandler {
|
||||
pub async fn run(mut self) -> Result<()> {
|
||||
self.stats.increment_connects_all();
|
||||
|
||||
self.peer = normalize_ip(self.peer);
|
||||
let peer = self.peer;
|
||||
let _ip_tracker = self.ip_tracker.clone();
|
||||
debug!(peer = %peer, "New connection");
|
||||
|
||||
if let Err(e) = configure_client_socket(
|
||||
@@ -80,44 +383,116 @@ impl RunningClientHandler {
|
||||
|
||||
let handshake_timeout = Duration::from_secs(self.config.timeouts.client_handshake);
|
||||
let stats = self.stats.clone();
|
||||
let config_for_timeout = self.config.clone();
|
||||
let beobachten_for_timeout = self.beobachten.clone();
|
||||
let peer_for_timeout = peer.ip();
|
||||
|
||||
let result = timeout(handshake_timeout, self.do_handshake()).await;
|
||||
|
||||
match result {
|
||||
Ok(Ok(())) => {
|
||||
debug!(peer = %peer, "Connection handled successfully");
|
||||
Ok(())
|
||||
}
|
||||
// Phase 1: handshake (with timeout)
|
||||
let outcome = match timeout(handshake_timeout, self.do_handshake()).await {
|
||||
Ok(Ok(outcome)) => outcome,
|
||||
Ok(Err(e)) => {
|
||||
debug!(peer = %peer, error = %e, "Handshake failed");
|
||||
Err(e)
|
||||
record_handshake_failure_class(
|
||||
&beobachten_for_timeout,
|
||||
&config_for_timeout,
|
||||
peer_for_timeout,
|
||||
&e,
|
||||
);
|
||||
return Err(e);
|
||||
}
|
||||
Err(_) => {
|
||||
stats.increment_handshake_timeouts();
|
||||
debug!(peer = %peer, "Handshake timeout");
|
||||
Err(ProxyError::TgHandshakeTimeout)
|
||||
record_beobachten_class(
|
||||
&beobachten_for_timeout,
|
||||
&config_for_timeout,
|
||||
peer_for_timeout,
|
||||
"other",
|
||||
);
|
||||
return Err(ProxyError::TgHandshakeTimeout);
|
||||
}
|
||||
};
|
||||
|
||||
// Phase 2: relay (WITHOUT handshake timeout — relay has its own activity timeouts)
|
||||
match outcome {
|
||||
HandshakeOutcome::NeedsRelay(fut) => fut.await,
|
||||
HandshakeOutcome::Handled => Ok(()),
|
||||
}
|
||||
}
|
||||
|
||||
async fn do_handshake(mut self) -> Result<()> {
|
||||
async fn do_handshake(mut self) -> Result<HandshakeOutcome> {
|
||||
let mut local_addr = self.stream.local_addr().map_err(ProxyError::Io)?;
|
||||
|
||||
if self.proxy_protocol_enabled {
|
||||
let proxy_header_timeout = Duration::from_millis(
|
||||
self.config.server.proxy_protocol_header_timeout_ms.max(1),
|
||||
);
|
||||
match timeout(
|
||||
proxy_header_timeout,
|
||||
parse_proxy_protocol(&mut self.stream, self.peer),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Ok(info)) => {
|
||||
debug!(
|
||||
peer = %self.peer,
|
||||
client = %info.src_addr,
|
||||
version = info.version,
|
||||
"PROXY protocol header parsed"
|
||||
);
|
||||
self.peer = normalize_ip(info.src_addr);
|
||||
if let Some(dst) = info.dst_addr {
|
||||
local_addr = dst;
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
self.stats.increment_connects_bad();
|
||||
warn!(peer = %self.peer, error = %e, "Invalid PROXY protocol header");
|
||||
record_beobachten_class(
|
||||
&self.beobachten,
|
||||
&self.config,
|
||||
self.peer.ip(),
|
||||
"other",
|
||||
);
|
||||
return Err(e);
|
||||
}
|
||||
Err(_) => {
|
||||
self.stats.increment_connects_bad();
|
||||
warn!(
|
||||
peer = %self.peer,
|
||||
timeout_ms = proxy_header_timeout.as_millis(),
|
||||
"PROXY protocol header timeout"
|
||||
);
|
||||
record_beobachten_class(
|
||||
&self.beobachten,
|
||||
&self.config,
|
||||
self.peer.ip(),
|
||||
"other",
|
||||
);
|
||||
return Err(ProxyError::InvalidProxyProtocol);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut first_bytes = [0u8; 5];
|
||||
self.stream.read_exact(&mut first_bytes).await?;
|
||||
|
||||
let is_tls = tls::is_tls_handshake(&first_bytes[..3]);
|
||||
let peer = self.peer;
|
||||
let _ip_tracker = self.ip_tracker.clone();
|
||||
|
||||
debug!(peer = %peer, is_tls = is_tls, "Handshake type detected");
|
||||
|
||||
if is_tls {
|
||||
self.handle_tls_client(first_bytes).await
|
||||
self.handle_tls_client(first_bytes, local_addr).await
|
||||
} else {
|
||||
self.handle_direct_client(first_bytes).await
|
||||
self.handle_direct_client(first_bytes, local_addr).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_tls_client(mut self, first_bytes: [u8; 5]) -> Result<()> {
|
||||
async fn handle_tls_client(mut self, first_bytes: [u8; 5], local_addr: SocketAddr) -> Result<HandshakeOutcome> {
|
||||
let peer = self.peer;
|
||||
let _ip_tracker = self.ip_tracker.clone();
|
||||
|
||||
let tls_len = u16::from_be_bytes([first_bytes[3], first_bytes[4]]) as usize;
|
||||
|
||||
@@ -127,8 +502,17 @@ impl RunningClientHandler {
|
||||
debug!(peer = %peer, tls_len = tls_len, "TLS handshake too short");
|
||||
self.stats.increment_connects_bad();
|
||||
let (reader, writer) = self.stream.into_split();
|
||||
handle_bad_client(reader, writer, &first_bytes, &self.config).await;
|
||||
return Ok(());
|
||||
handle_bad_client(
|
||||
reader,
|
||||
writer,
|
||||
&first_bytes,
|
||||
peer,
|
||||
local_addr,
|
||||
&self.config,
|
||||
&self.beobachten,
|
||||
)
|
||||
.await;
|
||||
return Ok(HandshakeOutcome::Handled);
|
||||
}
|
||||
|
||||
let mut handshake = vec![0u8; 5 + tls_len];
|
||||
@@ -140,10 +524,9 @@ impl RunningClientHandler {
|
||||
let stats = self.stats.clone();
|
||||
let buffer_pool = self.buffer_pool.clone();
|
||||
|
||||
let local_addr = self.stream.local_addr().map_err(ProxyError::Io)?;
|
||||
let (read_half, write_half) = self.stream.into_split();
|
||||
|
||||
let (mut tls_reader, tls_writer, _tls_user) = match handle_tls_handshake(
|
||||
let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake(
|
||||
&handshake,
|
||||
read_half,
|
||||
write_half,
|
||||
@@ -151,14 +534,24 @@ impl RunningClientHandler {
|
||||
&config,
|
||||
&replay_checker,
|
||||
&self.rng,
|
||||
self.tls_cache.clone(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
HandshakeResult::Success(result) => result,
|
||||
HandshakeResult::BadClient { reader, writer } => {
|
||||
stats.increment_connects_bad();
|
||||
handle_bad_client(reader, writer, &handshake, &config).await;
|
||||
return Ok(());
|
||||
handle_bad_client(
|
||||
reader,
|
||||
writer,
|
||||
&handshake,
|
||||
peer,
|
||||
local_addr,
|
||||
&config,
|
||||
&self.beobachten,
|
||||
)
|
||||
.await;
|
||||
return Ok(HandshakeOutcome::Handled);
|
||||
}
|
||||
HandshakeResult::Error(e) => return Err(e),
|
||||
};
|
||||
@@ -177,6 +570,7 @@ impl RunningClientHandler {
|
||||
&config,
|
||||
&replay_checker,
|
||||
true,
|
||||
Some(tls_user.as_str()),
|
||||
)
|
||||
.await
|
||||
{
|
||||
@@ -187,35 +581,48 @@ impl RunningClientHandler {
|
||||
} => {
|
||||
stats.increment_connects_bad();
|
||||
debug!(peer = %peer, "Valid TLS but invalid MTProto handshake");
|
||||
return Ok(());
|
||||
return Ok(HandshakeOutcome::Handled);
|
||||
}
|
||||
HandshakeResult::Error(e) => return Err(e),
|
||||
};
|
||||
|
||||
Self::handle_authenticated_static(
|
||||
crypto_reader,
|
||||
crypto_writer,
|
||||
success,
|
||||
self.upstream_manager,
|
||||
self.stats,
|
||||
self.config,
|
||||
buffer_pool,
|
||||
self.rng,
|
||||
self.me_pool,
|
||||
local_addr,
|
||||
)
|
||||
.await
|
||||
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||
Self::handle_authenticated_static(
|
||||
crypto_reader,
|
||||
crypto_writer,
|
||||
success,
|
||||
self.upstream_manager,
|
||||
self.stats,
|
||||
self.config,
|
||||
buffer_pool,
|
||||
self.rng,
|
||||
self.me_pool,
|
||||
local_addr,
|
||||
peer,
|
||||
self.ip_tracker,
|
||||
),
|
||||
)))
|
||||
}
|
||||
|
||||
async fn handle_direct_client(mut self, first_bytes: [u8; 5]) -> Result<()> {
|
||||
async fn handle_direct_client(mut self, first_bytes: [u8; 5], local_addr: SocketAddr) -> Result<HandshakeOutcome> {
|
||||
let peer = self.peer;
|
||||
let _ip_tracker = self.ip_tracker.clone();
|
||||
|
||||
if !self.config.general.modes.classic && !self.config.general.modes.secure {
|
||||
debug!(peer = %peer, "Non-TLS modes disabled");
|
||||
self.stats.increment_connects_bad();
|
||||
let (reader, writer) = self.stream.into_split();
|
||||
handle_bad_client(reader, writer, &first_bytes, &self.config).await;
|
||||
return Ok(());
|
||||
handle_bad_client(
|
||||
reader,
|
||||
writer,
|
||||
&first_bytes,
|
||||
peer,
|
||||
local_addr,
|
||||
&self.config,
|
||||
&self.beobachten,
|
||||
)
|
||||
.await;
|
||||
return Ok(HandshakeOutcome::Handled);
|
||||
}
|
||||
|
||||
let mut handshake = [0u8; HANDSHAKE_LEN];
|
||||
@@ -227,7 +634,6 @@ impl RunningClientHandler {
|
||||
let stats = self.stats.clone();
|
||||
let buffer_pool = self.buffer_pool.clone();
|
||||
|
||||
let local_addr = self.stream.local_addr().map_err(ProxyError::Io)?;
|
||||
let (read_half, write_half) = self.stream.into_split();
|
||||
|
||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
||||
@@ -238,31 +644,44 @@ impl RunningClientHandler {
|
||||
&config,
|
||||
&replay_checker,
|
||||
false,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
{
|
||||
HandshakeResult::Success(result) => result,
|
||||
HandshakeResult::BadClient { reader, writer } => {
|
||||
stats.increment_connects_bad();
|
||||
handle_bad_client(reader, writer, &handshake, &config).await;
|
||||
return Ok(());
|
||||
handle_bad_client(
|
||||
reader,
|
||||
writer,
|
||||
&handshake,
|
||||
peer,
|
||||
local_addr,
|
||||
&config,
|
||||
&self.beobachten,
|
||||
)
|
||||
.await;
|
||||
return Ok(HandshakeOutcome::Handled);
|
||||
}
|
||||
HandshakeResult::Error(e) => return Err(e),
|
||||
};
|
||||
|
||||
Self::handle_authenticated_static(
|
||||
crypto_reader,
|
||||
crypto_writer,
|
||||
success,
|
||||
self.upstream_manager,
|
||||
self.stats,
|
||||
self.config,
|
||||
buffer_pool,
|
||||
self.rng,
|
||||
self.me_pool,
|
||||
local_addr,
|
||||
)
|
||||
.await
|
||||
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||
Self::handle_authenticated_static(
|
||||
crypto_reader,
|
||||
crypto_writer,
|
||||
success,
|
||||
self.upstream_manager,
|
||||
self.stats,
|
||||
self.config,
|
||||
buffer_pool,
|
||||
self.rng,
|
||||
self.me_pool,
|
||||
local_addr,
|
||||
peer,
|
||||
self.ip_tracker,
|
||||
),
|
||||
)))
|
||||
}
|
||||
|
||||
/// Main dispatch after successful handshake.
|
||||
@@ -280,22 +699,23 @@ impl RunningClientHandler {
|
||||
rng: Arc<SecureRandom>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
local_addr: SocketAddr,
|
||||
peer_addr: SocketAddr,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
) -> Result<()>
|
||||
where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
let user = &success.user;
|
||||
let user = success.user.clone();
|
||||
|
||||
if let Err(e) = Self::check_user_limits_static(user, &config, &stats) {
|
||||
if let Err(e) = Self::check_user_limits_static(&user, &config, &stats, peer_addr, &ip_tracker).await {
|
||||
warn!(user = %user, error = %e, "User limit exceeded");
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
// Decide: middle proxy or direct
|
||||
if config.general.use_middle_proxy {
|
||||
let relay_result = if config.general.use_middle_proxy {
|
||||
if let Some(ref pool) = me_pool {
|
||||
return handle_via_middle_proxy(
|
||||
handle_via_middle_proxy(
|
||||
client_reader,
|
||||
client_writer,
|
||||
success,
|
||||
@@ -304,49 +724,98 @@ impl RunningClientHandler {
|
||||
config,
|
||||
buffer_pool,
|
||||
local_addr,
|
||||
rng,
|
||||
)
|
||||
.await;
|
||||
.await
|
||||
} else {
|
||||
warn!("use_middle_proxy=true but MePool not initialized, falling back to direct");
|
||||
handle_via_direct(
|
||||
client_reader,
|
||||
client_writer,
|
||||
success,
|
||||
upstream_manager,
|
||||
stats,
|
||||
config,
|
||||
buffer_pool,
|
||||
rng,
|
||||
)
|
||||
.await
|
||||
}
|
||||
warn!("use_middle_proxy=true but MePool not initialized, falling back to direct");
|
||||
}
|
||||
} else {
|
||||
// Direct mode (original behavior)
|
||||
handle_via_direct(
|
||||
client_reader,
|
||||
client_writer,
|
||||
success,
|
||||
upstream_manager,
|
||||
stats,
|
||||
config,
|
||||
buffer_pool,
|
||||
rng,
|
||||
)
|
||||
.await
|
||||
};
|
||||
|
||||
// Direct mode (original behavior)
|
||||
handle_via_direct(
|
||||
client_reader,
|
||||
client_writer,
|
||||
success,
|
||||
upstream_manager,
|
||||
stats,
|
||||
config,
|
||||
buffer_pool,
|
||||
rng,
|
||||
)
|
||||
.await
|
||||
ip_tracker.remove_ip(&user, peer_addr.ip()).await;
|
||||
relay_result
|
||||
}
|
||||
|
||||
fn check_user_limits_static(user: &str, config: &ProxyConfig, stats: &Stats) -> Result<()> {
|
||||
if let Some(expiration) = config.access.user_expirations.get(user) {
|
||||
if chrono::Utc::now() > *expiration {
|
||||
return Err(ProxyError::UserExpired {
|
||||
user: user.to_string(),
|
||||
});
|
||||
}
|
||||
async fn check_user_limits_static(
|
||||
user: &str,
|
||||
config: &ProxyConfig,
|
||||
stats: &Stats,
|
||||
peer_addr: SocketAddr,
|
||||
ip_tracker: &UserIpTracker,
|
||||
) -> Result<()> {
|
||||
if let Some(expiration) = config.access.user_expirations.get(user)
|
||||
&& chrono::Utc::now() > *expiration
|
||||
{
|
||||
return Err(ProxyError::UserExpired {
|
||||
user: user.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(limit) = config.access.user_max_tcp_conns.get(user) {
|
||||
if stats.get_user_curr_connects(user) >= *limit as u64 {
|
||||
let mut ip_reserved = false;
|
||||
// IP limit check
|
||||
match ip_tracker.check_and_add(user, peer_addr.ip()).await {
|
||||
Ok(()) => {
|
||||
ip_reserved = true;
|
||||
}
|
||||
Err(reason) => {
|
||||
warn!(
|
||||
user = %user,
|
||||
ip = %peer_addr.ip(),
|
||||
reason = %reason,
|
||||
"IP limit exceeded"
|
||||
);
|
||||
return Err(ProxyError::ConnectionLimitExceeded {
|
||||
user: user.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(quota) = config.access.user_data_quota.get(user) {
|
||||
if stats.get_user_total_octets(user) >= *quota {
|
||||
return Err(ProxyError::DataQuotaExceeded {
|
||||
user: user.to_string(),
|
||||
});
|
||||
if let Some(limit) = config.access.user_max_tcp_conns.get(user)
|
||||
&& stats.get_user_curr_connects(user) >= *limit as u64
|
||||
{
|
||||
if ip_reserved {
|
||||
ip_tracker.remove_ip(user, peer_addr.ip()).await;
|
||||
stats.increment_ip_reservation_rollback_tcp_limit_total();
|
||||
}
|
||||
return Err(ProxyError::ConnectionLimitExceeded {
|
||||
user: user.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(quota) = config.access.user_data_quota.get(user)
|
||||
&& stats.get_user_total_octets(user) >= *quota
|
||||
{
|
||||
if ip_reserved {
|
||||
ip_tracker.remove_ip(user, peer_addr.ip()).await;
|
||||
stats.increment_ip_reservation_rollback_quota_limit_total();
|
||||
}
|
||||
return Err(ProxyError::DataQuotaExceeded {
|
||||
user: user.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::Write;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -32,7 +34,7 @@ where
|
||||
let user = &success.user;
|
||||
let dc_addr = get_dc_addr_static(success.dc_idx, &config)?;
|
||||
|
||||
info!(
|
||||
debug!(
|
||||
user = %user,
|
||||
peer = %success.peer,
|
||||
dc = success.dc_idx,
|
||||
@@ -43,7 +45,7 @@ where
|
||||
);
|
||||
|
||||
let tg_stream = upstream_manager
|
||||
.connect(dc_addr, Some(success.dc_idx))
|
||||
.connect(dc_addr, Some(success.dc_idx), user.strip_prefix("scope_").filter(|s| !s.is_empty()))
|
||||
.await?;
|
||||
|
||||
debug!(peer = %success.peer, dc_addr = %dc_addr, "Connected, performing TG handshake");
|
||||
@@ -55,6 +57,7 @@ where
|
||||
|
||||
stats.increment_user_connects(user);
|
||||
stats.increment_user_curr_connects(user);
|
||||
stats.increment_current_connections_direct();
|
||||
|
||||
let relay_result = relay_bidirectional(
|
||||
client_reader,
|
||||
@@ -67,6 +70,7 @@ where
|
||||
)
|
||||
.await;
|
||||
|
||||
stats.decrement_current_connections_direct();
|
||||
stats.decrement_user_curr_connects(user);
|
||||
|
||||
match &relay_result {
|
||||
@@ -78,7 +82,8 @@ where
|
||||
}
|
||||
|
||||
fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
|
||||
let datacenters = if config.general.prefer_ipv6 {
|
||||
let prefer_v6 = config.network.prefer == 6 && config.network.ipv6.unwrap_or(true);
|
||||
let datacenters = if prefer_v6 {
|
||||
&*TG_DATACENTERS_V6
|
||||
} else {
|
||||
&*TG_DATACENTERS_V4
|
||||
@@ -87,17 +92,24 @@ fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
|
||||
let num_dcs = datacenters.len();
|
||||
|
||||
let dc_key = dc_idx.to_string();
|
||||
if let Some(addr_str) = config.dc_overrides.get(&dc_key) {
|
||||
match addr_str.parse::<SocketAddr>() {
|
||||
Ok(addr) => {
|
||||
debug!(dc_idx = dc_idx, addr = %addr, "Using DC override from config");
|
||||
return Ok(addr);
|
||||
}
|
||||
Err(_) => {
|
||||
warn!(dc_idx = dc_idx, addr_str = %addr_str,
|
||||
"Invalid DC override address in config, ignoring");
|
||||
if let Some(addrs) = config.dc_overrides.get(&dc_key) {
|
||||
let mut parsed = Vec::new();
|
||||
for addr_str in addrs {
|
||||
match addr_str.parse::<SocketAddr>() {
|
||||
Ok(addr) => parsed.push(addr),
|
||||
Err(_) => warn!(dc_idx = dc_idx, addr_str = %addr_str, "Invalid DC override address in config, ignoring"),
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(addr) = parsed
|
||||
.iter()
|
||||
.find(|a| a.is_ipv6() == prefer_v6)
|
||||
.or_else(|| parsed.first())
|
||||
.copied()
|
||||
{
|
||||
debug!(dc_idx = dc_idx, addr = %addr, count = parsed.len(), "Using DC override from config");
|
||||
return Ok(addr);
|
||||
}
|
||||
}
|
||||
|
||||
let abs_dc = dc_idx.unsigned_abs() as usize;
|
||||
@@ -105,6 +117,22 @@ fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
|
||||
return Ok(SocketAddr::new(datacenters[abs_dc - 1], TG_DATACENTER_PORT));
|
||||
}
|
||||
|
||||
// Unknown DC requested by client without override: log and fall back.
|
||||
if !config.dc_overrides.contains_key(&dc_key) {
|
||||
warn!(dc_idx = dc_idx, "Requested non-standard DC with no override; falling back to default cluster");
|
||||
if config.general.unknown_dc_file_log_enabled
|
||||
&& let Some(path) = &config.general.unknown_dc_log_path
|
||||
&& let Ok(handle) = tokio::runtime::Handle::try_current()
|
||||
{
|
||||
let path = path.clone();
|
||||
handle.spawn_blocking(move || {
|
||||
if let Ok(mut file) = OpenOptions::new().create(true).append(true).open(path) {
|
||||
let _ = writeln!(file, "dc_idx={dc_idx}");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let default_dc = config.default_dc.unwrap_or(2) as usize;
|
||||
let fallback_idx = if default_dc >= 1 && default_dc <= num_dcs {
|
||||
default_dc - 1
|
||||
@@ -139,6 +167,8 @@ async fn do_tg_handshake_static(
|
||||
success.dc_idx,
|
||||
&success.dec_key,
|
||||
success.dec_iv,
|
||||
&success.enc_key,
|
||||
success.enc_iv,
|
||||
rng,
|
||||
config.general.fast_mode,
|
||||
);
|
||||
@@ -156,8 +186,9 @@ async fn do_tg_handshake_static(
|
||||
|
||||
let (read_half, write_half) = stream.into_split();
|
||||
|
||||
let max_pending = config.general.crypto_pending_buffer;
|
||||
Ok((
|
||||
CryptoReader::new(read_half, tg_decryptor),
|
||||
CryptoWriter::new(write_half, tg_encryptor),
|
||||
CryptoWriter::new(write_half, tg_encryptor, max_pending),
|
||||
))
|
||||
}
|
||||
|
||||
@@ -1,17 +1,48 @@
|
||||
//! MTProto Handshake
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
|
||||
use tracing::{debug, warn, trace, info};
|
||||
use tracing::{debug, warn, trace};
|
||||
use zeroize::Zeroize;
|
||||
|
||||
use crate::crypto::{sha256, AesCtr, SecureRandom};
|
||||
use rand::Rng;
|
||||
use crate::protocol::constants::*;
|
||||
use crate::protocol::tls;
|
||||
use crate::stream::{FakeTlsReader, FakeTlsWriter, CryptoReader, CryptoWriter};
|
||||
use crate::error::{ProxyError, HandshakeResult};
|
||||
use crate::stats::ReplayChecker;
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::tls_front::{TlsFrontCache, emulator};
|
||||
|
||||
fn decode_user_secrets(
|
||||
config: &ProxyConfig,
|
||||
preferred_user: Option<&str>,
|
||||
) -> Vec<(String, Vec<u8>)> {
|
||||
let mut secrets = Vec::with_capacity(config.access.users.len());
|
||||
|
||||
if let Some(preferred) = preferred_user
|
||||
&& let Some(secret_hex) = config.access.users.get(preferred)
|
||||
&& let Ok(bytes) = hex::decode(secret_hex)
|
||||
{
|
||||
secrets.push((preferred.to_string(), bytes));
|
||||
}
|
||||
|
||||
for (name, secret_hex) in &config.access.users {
|
||||
if preferred_user.is_some_and(|preferred| preferred == name.as_str()) {
|
||||
continue;
|
||||
}
|
||||
if let Ok(bytes) = hex::decode(secret_hex) {
|
||||
secrets.push((name.clone(), bytes));
|
||||
}
|
||||
}
|
||||
|
||||
secrets
|
||||
}
|
||||
|
||||
/// Result of successful handshake
|
||||
///
|
||||
@@ -55,6 +86,7 @@ pub async fn handle_tls_handshake<R, W>(
|
||||
config: &ProxyConfig,
|
||||
replay_checker: &ReplayChecker,
|
||||
rng: &SecureRandom,
|
||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
) -> HandshakeResult<(FakeTlsReader<R>, FakeTlsWriter<W>, String), R, W>
|
||||
where
|
||||
R: AsyncRead + Unpin,
|
||||
@@ -70,16 +102,12 @@ where
|
||||
let digest = &handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN];
|
||||
let digest_half = &digest[..tls::TLS_DIGEST_HALF_LEN];
|
||||
|
||||
if replay_checker.check_tls_digest(digest_half) {
|
||||
if replay_checker.check_and_add_tls_digest(digest_half) {
|
||||
warn!(peer = %peer, "TLS replay attack detected (duplicate digest)");
|
||||
return HandshakeResult::BadClient { reader, writer };
|
||||
}
|
||||
|
||||
let secrets: Vec<(String, Vec<u8>)> = config.access.users.iter()
|
||||
.filter_map(|(name, hex)| {
|
||||
hex::decode(hex).ok().map(|bytes| (name.clone(), bytes))
|
||||
})
|
||||
.collect();
|
||||
let secrets = decode_user_secrets(config, None);
|
||||
|
||||
let validation = match tls::validate_tls_handshake(
|
||||
handshake,
|
||||
@@ -102,13 +130,85 @@ where
|
||||
None => return HandshakeResult::BadClient { reader, writer },
|
||||
};
|
||||
|
||||
let response = tls::build_server_hello(
|
||||
secret,
|
||||
&validation.digest,
|
||||
&validation.session_id,
|
||||
config.censorship.fake_cert_len,
|
||||
rng,
|
||||
);
|
||||
let cached = if config.censorship.tls_emulation {
|
||||
if let Some(cache) = tls_cache.as_ref() {
|
||||
let selected_domain = if let Some(sni) = tls::extract_sni_from_client_hello(handshake) {
|
||||
if cache.contains_domain(&sni).await {
|
||||
sni
|
||||
} else {
|
||||
config.censorship.tls_domain.clone()
|
||||
}
|
||||
} else {
|
||||
config.censorship.tls_domain.clone()
|
||||
};
|
||||
let cached_entry = cache.get(&selected_domain).await;
|
||||
let use_full_cert_payload = cache
|
||||
.take_full_cert_budget_for_ip(
|
||||
peer.ip(),
|
||||
Duration::from_secs(config.censorship.tls_full_cert_ttl_secs),
|
||||
)
|
||||
.await;
|
||||
Some((cached_entry, use_full_cert_payload))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let alpn_list = if config.censorship.alpn_enforce {
|
||||
tls::extract_alpn_from_client_hello(handshake)
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
let selected_alpn = if config.censorship.alpn_enforce {
|
||||
if alpn_list.iter().any(|p| p == b"h2") {
|
||||
Some(b"h2".to_vec())
|
||||
} else if alpn_list.iter().any(|p| p == b"http/1.1") {
|
||||
Some(b"http/1.1".to_vec())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let response = if let Some((cached_entry, use_full_cert_payload)) = cached {
|
||||
emulator::build_emulated_server_hello(
|
||||
secret,
|
||||
&validation.digest,
|
||||
&validation.session_id,
|
||||
&cached_entry,
|
||||
use_full_cert_payload,
|
||||
rng,
|
||||
selected_alpn.clone(),
|
||||
config.censorship.tls_new_session_tickets,
|
||||
)
|
||||
} else {
|
||||
tls::build_server_hello(
|
||||
secret,
|
||||
&validation.digest,
|
||||
&validation.session_id,
|
||||
config.censorship.fake_cert_len,
|
||||
rng,
|
||||
selected_alpn.clone(),
|
||||
config.censorship.tls_new_session_tickets,
|
||||
)
|
||||
};
|
||||
|
||||
// Optional anti-fingerprint delay before sending ServerHello.
|
||||
if config.censorship.server_hello_delay_max_ms > 0 {
|
||||
let min = config.censorship.server_hello_delay_min_ms;
|
||||
let max = config.censorship.server_hello_delay_max_ms.max(min);
|
||||
let delay_ms = if max == min {
|
||||
max
|
||||
} else {
|
||||
rand::rng().random_range(min..=max)
|
||||
};
|
||||
if delay_ms > 0 {
|
||||
tokio::time::sleep(std::time::Duration::from_millis(delay_ms)).await;
|
||||
}
|
||||
}
|
||||
|
||||
debug!(peer = %peer, response_len = response.len(), "Sending TLS ServerHello");
|
||||
|
||||
@@ -122,9 +222,7 @@ where
|
||||
return HandshakeResult::Error(ProxyError::Io(e));
|
||||
}
|
||||
|
||||
replay_checker.add_tls_digest(digest_half);
|
||||
|
||||
info!(
|
||||
debug!(
|
||||
peer = %peer,
|
||||
user = %validation.user,
|
||||
"TLS handshake successful"
|
||||
@@ -146,6 +244,7 @@ pub async fn handle_mtproto_handshake<R, W>(
|
||||
config: &ProxyConfig,
|
||||
replay_checker: &ReplayChecker,
|
||||
is_tls: bool,
|
||||
preferred_user: Option<&str>,
|
||||
) -> HandshakeResult<(CryptoReader<R>, CryptoWriter<W>, HandshakeSuccess), R, W>
|
||||
where
|
||||
R: AsyncRead + Unpin + Send,
|
||||
@@ -155,18 +254,16 @@ where
|
||||
|
||||
let dec_prekey_iv = &handshake[SKIP_LEN..SKIP_LEN + PREKEY_LEN + IV_LEN];
|
||||
|
||||
if replay_checker.check_handshake(dec_prekey_iv) {
|
||||
if replay_checker.check_and_add_handshake(dec_prekey_iv) {
|
||||
warn!(peer = %peer, "MTProto replay attack detected");
|
||||
return HandshakeResult::BadClient { reader, writer };
|
||||
}
|
||||
|
||||
let enc_prekey_iv: Vec<u8> = dec_prekey_iv.iter().rev().copied().collect();
|
||||
|
||||
for (user, secret_hex) in &config.access.users {
|
||||
let secret = match hex::decode(secret_hex) {
|
||||
Ok(s) => s,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let decoded_users = decode_user_secrets(config, preferred_user);
|
||||
|
||||
for (user, secret) in decoded_users {
|
||||
|
||||
let dec_prekey = &dec_prekey_iv[..PREKEY_LEN];
|
||||
let dec_iv_bytes = &dec_prekey_iv[PREKEY_LEN..];
|
||||
@@ -192,7 +289,11 @@ where
|
||||
|
||||
let mode_ok = match proto_tag {
|
||||
ProtoTag::Secure => {
|
||||
if is_tls { config.general.modes.tls } else { config.general.modes.secure }
|
||||
if is_tls {
|
||||
config.general.modes.tls || config.general.modes.secure
|
||||
} else {
|
||||
config.general.modes.secure || config.general.modes.tls
|
||||
}
|
||||
}
|
||||
ProtoTag::Intermediate | ProtoTag::Abridged => config.general.modes.classic,
|
||||
};
|
||||
@@ -216,8 +317,6 @@ where
|
||||
|
||||
let enc_iv = u128::from_be_bytes(enc_iv_bytes.try_into().unwrap());
|
||||
|
||||
replay_checker.add_handshake(dec_prekey_iv);
|
||||
|
||||
let encryptor = AesCtr::new(&enc_key, enc_iv);
|
||||
|
||||
let success = HandshakeSuccess {
|
||||
@@ -232,7 +331,7 @@ where
|
||||
is_tls,
|
||||
};
|
||||
|
||||
info!(
|
||||
debug!(
|
||||
peer = %peer,
|
||||
user = %user,
|
||||
dc = dc_idx,
|
||||
@@ -241,9 +340,10 @@ where
|
||||
"MTProto handshake successful"
|
||||
);
|
||||
|
||||
let max_pending = config.general.crypto_pending_buffer;
|
||||
return HandshakeResult::Success((
|
||||
CryptoReader::new(reader, decryptor),
|
||||
CryptoWriter::new(writer, encryptor),
|
||||
CryptoWriter::new(writer, encryptor, max_pending),
|
||||
success,
|
||||
));
|
||||
}
|
||||
@@ -256,8 +356,10 @@ where
|
||||
pub fn generate_tg_nonce(
|
||||
proto_tag: ProtoTag,
|
||||
dc_idx: i16,
|
||||
client_dec_key: &[u8; 32],
|
||||
client_dec_iv: u128,
|
||||
_client_dec_key: &[u8; 32],
|
||||
_client_dec_iv: u128,
|
||||
client_enc_key: &[u8; 32],
|
||||
client_enc_iv: u128,
|
||||
rng: &SecureRandom,
|
||||
fast_mode: bool,
|
||||
) -> ([u8; HANDSHAKE_LEN], [u8; 32], u128, [u8; 32], u128) {
|
||||
@@ -278,9 +380,11 @@ pub fn generate_tg_nonce(
|
||||
nonce[DC_IDX_POS..DC_IDX_POS + 2].copy_from_slice(&dc_idx.to_le_bytes());
|
||||
|
||||
if fast_mode {
|
||||
nonce[SKIP_LEN..SKIP_LEN + KEY_LEN].copy_from_slice(client_dec_key);
|
||||
nonce[SKIP_LEN + KEY_LEN..SKIP_LEN + KEY_LEN + IV_LEN]
|
||||
.copy_from_slice(&client_dec_iv.to_be_bytes());
|
||||
let mut key_iv = Vec::with_capacity(KEY_LEN + IV_LEN);
|
||||
key_iv.extend_from_slice(client_enc_key);
|
||||
key_iv.extend_from_slice(&client_enc_iv.to_be_bytes());
|
||||
key_iv.reverse(); // Python/C behavior: reversed enc_key+enc_iv in nonce
|
||||
nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN].copy_from_slice(&key_iv);
|
||||
}
|
||||
|
||||
let enc_key_iv = &nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN];
|
||||
@@ -332,10 +436,21 @@ mod tests {
|
||||
fn test_generate_tg_nonce() {
|
||||
let client_dec_key = [0x42u8; 32];
|
||||
let client_dec_iv = 12345u128;
|
||||
let client_enc_key = [0x24u8; 32];
|
||||
let client_enc_iv = 54321u128;
|
||||
|
||||
let rng = SecureRandom::new();
|
||||
let (nonce, _tg_enc_key, _tg_enc_iv, _tg_dec_key, _tg_dec_iv) =
|
||||
generate_tg_nonce(ProtoTag::Secure, 2, &client_dec_key, client_dec_iv, &rng, false);
|
||||
generate_tg_nonce(
|
||||
ProtoTag::Secure,
|
||||
2,
|
||||
&client_dec_key,
|
||||
client_dec_iv,
|
||||
&client_enc_key,
|
||||
client_enc_iv,
|
||||
&rng,
|
||||
false,
|
||||
);
|
||||
|
||||
assert_eq!(nonce.len(), HANDSHAKE_LEN);
|
||||
|
||||
@@ -347,10 +462,21 @@ mod tests {
|
||||
fn test_encrypt_tg_nonce() {
|
||||
let client_dec_key = [0x42u8; 32];
|
||||
let client_dec_iv = 12345u128;
|
||||
let client_enc_key = [0x24u8; 32];
|
||||
let client_enc_iv = 54321u128;
|
||||
|
||||
let rng = SecureRandom::new();
|
||||
let (nonce, _, _, _, _) =
|
||||
generate_tg_nonce(ProtoTag::Secure, 2, &client_dec_key, client_dec_iv, &rng, false);
|
||||
generate_tg_nonce(
|
||||
ProtoTag::Secure,
|
||||
2,
|
||||
&client_dec_key,
|
||||
client_dec_iv,
|
||||
&client_enc_key,
|
||||
client_enc_iv,
|
||||
&rng,
|
||||
false,
|
||||
);
|
||||
|
||||
let encrypted = encrypt_tg_nonce(&nonce);
|
||||
|
||||
@@ -379,4 +505,4 @@ mod tests {
|
||||
drop(success);
|
||||
// Drop impl zeroizes key material without panic
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
//! Masking - forward unrecognized traffic to mask host
|
||||
|
||||
use std::time::Duration;
|
||||
use std::str;
|
||||
use std::net::SocketAddr;
|
||||
use std::time::Duration;
|
||||
use tokio::net::TcpStream;
|
||||
#[cfg(unix)]
|
||||
use tokio::net::UnixStream;
|
||||
@@ -9,22 +10,25 @@ use tokio::io::{AsyncRead, AsyncWrite, AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::time::timeout;
|
||||
use tracing::debug;
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::network::dns_overrides::resolve_socket_addr;
|
||||
use crate::stats::beobachten::BeobachtenStore;
|
||||
use crate::transport::proxy_protocol::{ProxyProtocolV1Builder, ProxyProtocolV2Builder};
|
||||
|
||||
const MASK_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
/// Maximum duration for the entire masking relay.
|
||||
/// Limits resource consumption from slow-loris attacks and port scanners.
|
||||
const MASK_RELAY_TIMEOUT: Duration = Duration::from_secs(60);
|
||||
/// Maximum duration for the entire masking relay.
|
||||
/// Limits resource consumption from slow-loris attacks and port scanners.
|
||||
const MASK_RELAY_TIMEOUT: Duration = Duration::from_secs(60);
|
||||
const MASK_BUFFER_SIZE: usize = 8192;
|
||||
|
||||
/// Detect client type based on initial data
|
||||
fn detect_client_type(data: &[u8]) -> &'static str {
|
||||
// Check for HTTP request
|
||||
if data.len() > 4 {
|
||||
if data.starts_with(b"GET ") || data.starts_with(b"POST") ||
|
||||
if data.len() > 4
|
||||
&& (data.starts_with(b"GET ") || data.starts_with(b"POST") ||
|
||||
data.starts_with(b"HEAD") || data.starts_with(b"PUT ") ||
|
||||
data.starts_with(b"DELETE") || data.starts_with(b"OPTIONS") {
|
||||
return "HTTP";
|
||||
}
|
||||
data.starts_with(b"DELETE") || data.starts_with(b"OPTIONS"))
|
||||
{
|
||||
return "HTTP";
|
||||
}
|
||||
|
||||
// Check for TLS ClientHello (0x16 = handshake, 0x03 0x01-0x03 = TLS version)
|
||||
@@ -50,20 +54,27 @@ pub async fn handle_bad_client<R, W>(
|
||||
reader: R,
|
||||
writer: W,
|
||||
initial_data: &[u8],
|
||||
peer: SocketAddr,
|
||||
local_addr: SocketAddr,
|
||||
config: &ProxyConfig,
|
||||
beobachten: &BeobachtenStore,
|
||||
)
|
||||
where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
let client_type = detect_client_type(initial_data);
|
||||
if config.general.beobachten {
|
||||
let ttl = Duration::from_secs(config.general.beobachten_minutes.saturating_mul(60));
|
||||
beobachten.record(client_type, peer.ip(), ttl);
|
||||
}
|
||||
|
||||
if !config.censorship.mask {
|
||||
// Masking disabled, just consume data
|
||||
consume_client_data(reader).await;
|
||||
return;
|
||||
}
|
||||
|
||||
let client_type = detect_client_type(initial_data);
|
||||
|
||||
// Connect via Unix socket or TCP
|
||||
#[cfg(unix)]
|
||||
if let Some(ref sock_path) = config.censorship.mask_unix_sock {
|
||||
@@ -77,8 +88,32 @@ where
|
||||
let connect_result = timeout(MASK_TIMEOUT, UnixStream::connect(sock_path)).await;
|
||||
match connect_result {
|
||||
Ok(Ok(stream)) => {
|
||||
let (mask_read, mask_write) = stream.into_split();
|
||||
relay_to_mask(reader, writer, mask_read, mask_write, initial_data).await;
|
||||
let (mask_read, mut mask_write) = stream.into_split();
|
||||
let proxy_header: Option<Vec<u8>> = match config.censorship.mask_proxy_protocol {
|
||||
0 => None,
|
||||
version => {
|
||||
let header = match version {
|
||||
2 => ProxyProtocolV2Builder::new().with_addrs(peer, local_addr).build(),
|
||||
_ => match (peer, local_addr) {
|
||||
(SocketAddr::V4(src), SocketAddr::V4(dst)) =>
|
||||
ProxyProtocolV1Builder::new().tcp4(src.into(), dst.into()).build(),
|
||||
(SocketAddr::V6(src), SocketAddr::V6(dst)) =>
|
||||
ProxyProtocolV1Builder::new().tcp6(src.into(), dst.into()).build(),
|
||||
_ =>
|
||||
ProxyProtocolV1Builder::new().build(),
|
||||
},
|
||||
};
|
||||
Some(header)
|
||||
}
|
||||
};
|
||||
if let Some(header) = proxy_header {
|
||||
if mask_write.write_all(&header).await.is_err() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if timeout(MASK_RELAY_TIMEOUT, relay_to_mask(reader, writer, mask_read, mask_write, initial_data)).await.is_err() {
|
||||
debug!("Mask relay timed out (unix socket)");
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
debug!(error = %e, "Failed to connect to mask unix socket");
|
||||
@@ -104,13 +139,40 @@ where
|
||||
"Forwarding bad client to mask host"
|
||||
);
|
||||
|
||||
// Connect to mask host
|
||||
let mask_addr = format!("{}:{}", mask_host, mask_port);
|
||||
// Apply runtime DNS override for mask target when configured.
|
||||
let mask_addr = resolve_socket_addr(mask_host, mask_port)
|
||||
.map(|addr| addr.to_string())
|
||||
.unwrap_or_else(|| format!("{}:{}", mask_host, mask_port));
|
||||
let connect_result = timeout(MASK_TIMEOUT, TcpStream::connect(&mask_addr)).await;
|
||||
match connect_result {
|
||||
Ok(Ok(stream)) => {
|
||||
let (mask_read, mask_write) = stream.into_split();
|
||||
relay_to_mask(reader, writer, mask_read, mask_write, initial_data).await;
|
||||
let proxy_header: Option<Vec<u8>> = match config.censorship.mask_proxy_protocol {
|
||||
0 => None,
|
||||
version => {
|
||||
let header = match version {
|
||||
2 => ProxyProtocolV2Builder::new().with_addrs(peer, local_addr).build(),
|
||||
_ => match (peer, local_addr) {
|
||||
(SocketAddr::V4(src), SocketAddr::V4(dst)) =>
|
||||
ProxyProtocolV1Builder::new().tcp4(src.into(), dst.into()).build(),
|
||||
(SocketAddr::V6(src), SocketAddr::V6(dst)) =>
|
||||
ProxyProtocolV1Builder::new().tcp6(src.into(), dst.into()).build(),
|
||||
_ =>
|
||||
ProxyProtocolV1Builder::new().build(),
|
||||
},
|
||||
};
|
||||
Some(header)
|
||||
}
|
||||
};
|
||||
|
||||
let (mask_read, mut mask_write) = stream.into_split();
|
||||
if let Some(header) = proxy_header {
|
||||
if mask_write.write_all(&header).await.is_err() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if timeout(MASK_RELAY_TIMEOUT, relay_to_mask(reader, writer, mask_read, mask_write, initial_data)).await.is_err() {
|
||||
debug!("Mask relay timed out");
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
debug!(error = %e, "Failed to connect to mask host");
|
||||
|
||||
@@ -1,26 +1,205 @@
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::{Arc, Mutex, OnceLock};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||
use tracing::{debug, info, trace};
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
use tracing::{debug, trace, warn};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::error::{ProxyError, Result};
|
||||
use crate::protocol::constants::*;
|
||||
use crate::protocol::constants::{*, secure_padding_len};
|
||||
use crate::proxy::handshake::HandshakeSuccess;
|
||||
use crate::stats::Stats;
|
||||
use crate::stream::{BufferPool, CryptoReader, CryptoWriter};
|
||||
use crate::transport::middle_proxy::{MePool, MeResponse, proto_flags_for_tag};
|
||||
|
||||
enum C2MeCommand {
|
||||
Data { payload: Vec<u8>, flags: u32 },
|
||||
Close,
|
||||
}
|
||||
|
||||
const DESYNC_DEDUP_WINDOW: Duration = Duration::from_secs(60);
|
||||
const DESYNC_ERROR_CLASS: &str = "frame_too_large_crypto_desync";
|
||||
const C2ME_CHANNEL_CAPACITY: usize = 1024;
|
||||
const C2ME_SOFT_PRESSURE_MIN_FREE_SLOTS: usize = 64;
|
||||
const C2ME_SENDER_FAIRNESS_BUDGET: usize = 32;
|
||||
static DESYNC_DEDUP: OnceLock<Mutex<HashMap<u64, Instant>>> = OnceLock::new();
|
||||
|
||||
struct RelayForensicsState {
|
||||
trace_id: u64,
|
||||
conn_id: u64,
|
||||
user: String,
|
||||
peer: SocketAddr,
|
||||
peer_hash: u64,
|
||||
started_at: Instant,
|
||||
bytes_c2me: u64,
|
||||
bytes_me2c: Arc<AtomicU64>,
|
||||
desync_all_full: bool,
|
||||
}
|
||||
|
||||
fn hash_value<T: Hash>(value: &T) -> u64 {
|
||||
let mut hasher = DefaultHasher::new();
|
||||
value.hash(&mut hasher);
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
fn hash_ip(ip: IpAddr) -> u64 {
|
||||
hash_value(&ip)
|
||||
}
|
||||
|
||||
fn should_emit_full_desync(key: u64, all_full: bool, now: Instant) -> bool {
|
||||
if all_full {
|
||||
return true;
|
||||
}
|
||||
|
||||
let dedup = DESYNC_DEDUP.get_or_init(|| Mutex::new(HashMap::new()));
|
||||
let mut guard = dedup.lock().expect("desync dedup mutex poisoned");
|
||||
guard.retain(|_, seen_at| now.duration_since(*seen_at) < DESYNC_DEDUP_WINDOW);
|
||||
|
||||
match guard.get_mut(&key) {
|
||||
Some(seen_at) => {
|
||||
if now.duration_since(*seen_at) >= DESYNC_DEDUP_WINDOW {
|
||||
*seen_at = now;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
None => {
|
||||
guard.insert(key, now);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn report_desync_frame_too_large(
|
||||
state: &RelayForensicsState,
|
||||
proto_tag: ProtoTag,
|
||||
frame_counter: u64,
|
||||
max_frame: usize,
|
||||
len: usize,
|
||||
raw_len_bytes: Option<[u8; 4]>,
|
||||
stats: &Stats,
|
||||
) -> ProxyError {
|
||||
let len_buf = raw_len_bytes.unwrap_or((len as u32).to_le_bytes());
|
||||
let looks_like_tls = raw_len_bytes
|
||||
.map(|b| b[0] == 0x16 && b[1] == 0x03)
|
||||
.unwrap_or(false);
|
||||
let looks_like_http = raw_len_bytes
|
||||
.map(|b| matches!(b[0], b'G' | b'P' | b'H' | b'C' | b'D'))
|
||||
.unwrap_or(false);
|
||||
let now = Instant::now();
|
||||
let dedup_key = hash_value(&(
|
||||
state.user.as_str(),
|
||||
state.peer_hash,
|
||||
proto_tag,
|
||||
DESYNC_ERROR_CLASS,
|
||||
));
|
||||
let emit_full = should_emit_full_desync(dedup_key, state.desync_all_full, now);
|
||||
let duration_ms = state.started_at.elapsed().as_millis() as u64;
|
||||
let bytes_me2c = state.bytes_me2c.load(Ordering::Relaxed);
|
||||
|
||||
stats.increment_desync_total();
|
||||
stats.observe_desync_frames_ok(frame_counter);
|
||||
if emit_full {
|
||||
stats.increment_desync_full_logged();
|
||||
warn!(
|
||||
trace_id = format_args!("0x{:016x}", state.trace_id),
|
||||
conn_id = state.conn_id,
|
||||
user = %state.user,
|
||||
peer_hash = format_args!("0x{:016x}", state.peer_hash),
|
||||
proto = ?proto_tag,
|
||||
mode = "middle_proxy",
|
||||
is_tls = true,
|
||||
duration_ms,
|
||||
bytes_c2me = state.bytes_c2me,
|
||||
bytes_me2c,
|
||||
raw_len = len,
|
||||
raw_len_hex = format_args!("0x{:08x}", len),
|
||||
raw_bytes = format_args!(
|
||||
"{:02x} {:02x} {:02x} {:02x}",
|
||||
len_buf[0], len_buf[1], len_buf[2], len_buf[3]
|
||||
),
|
||||
max_frame,
|
||||
tls_like = looks_like_tls,
|
||||
http_like = looks_like_http,
|
||||
frames_ok = frame_counter,
|
||||
dedup_window_secs = DESYNC_DEDUP_WINDOW.as_secs(),
|
||||
desync_all_full = state.desync_all_full,
|
||||
full_reason = if state.desync_all_full { "desync_all_full" } else { "first_in_dedup_window" },
|
||||
error_class = DESYNC_ERROR_CLASS,
|
||||
"Frame too large — crypto desync forensics"
|
||||
);
|
||||
debug!(
|
||||
trace_id = format_args!("0x{:016x}", state.trace_id),
|
||||
conn_id = state.conn_id,
|
||||
user = %state.user,
|
||||
peer = %state.peer,
|
||||
"Frame too large forensic peer detail"
|
||||
);
|
||||
} else {
|
||||
stats.increment_desync_suppressed();
|
||||
debug!(
|
||||
trace_id = format_args!("0x{:016x}", state.trace_id),
|
||||
conn_id = state.conn_id,
|
||||
user = %state.user,
|
||||
peer_hash = format_args!("0x{:016x}", state.peer_hash),
|
||||
proto = ?proto_tag,
|
||||
duration_ms,
|
||||
bytes_c2me = state.bytes_c2me,
|
||||
bytes_me2c,
|
||||
raw_len = len,
|
||||
frames_ok = frame_counter,
|
||||
dedup_window_secs = DESYNC_DEDUP_WINDOW.as_secs(),
|
||||
error_class = DESYNC_ERROR_CLASS,
|
||||
"Frame too large — crypto desync forensic suppressed"
|
||||
);
|
||||
}
|
||||
|
||||
ProxyError::Proxy(format!(
|
||||
"Frame too large: {len} (max {max_frame}), frames_ok={frame_counter}, conn_id={}, trace_id=0x{:016x}",
|
||||
state.conn_id,
|
||||
state.trace_id
|
||||
))
|
||||
}
|
||||
|
||||
fn should_yield_c2me_sender(sent_since_yield: usize, has_backlog: bool) -> bool {
|
||||
has_backlog && sent_since_yield >= C2ME_SENDER_FAIRNESS_BUDGET
|
||||
}
|
||||
|
||||
async fn enqueue_c2me_command(
|
||||
tx: &mpsc::Sender<C2MeCommand>,
|
||||
cmd: C2MeCommand,
|
||||
) -> std::result::Result<(), mpsc::error::SendError<C2MeCommand>> {
|
||||
match tx.try_send(cmd) {
|
||||
Ok(()) => Ok(()),
|
||||
Err(mpsc::error::TrySendError::Closed(cmd)) => Err(mpsc::error::SendError(cmd)),
|
||||
Err(mpsc::error::TrySendError::Full(cmd)) => {
|
||||
// Cooperative yield reduces burst catch-up when the per-conn queue is near saturation.
|
||||
if tx.capacity() <= C2ME_SOFT_PRESSURE_MIN_FREE_SLOTS {
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
tx.send(cmd).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_via_middle_proxy<R, W>(
|
||||
mut crypto_reader: CryptoReader<R>,
|
||||
mut crypto_writer: CryptoWriter<W>,
|
||||
crypto_writer: CryptoWriter<W>,
|
||||
success: HandshakeSuccess,
|
||||
me_pool: Arc<MePool>,
|
||||
stats: Arc<Stats>,
|
||||
_config: Arc<ProxyConfig>,
|
||||
config: Arc<ProxyConfig>,
|
||||
_buffer_pool: Arc<BufferPool>,
|
||||
local_addr: SocketAddr,
|
||||
rng: Arc<SecureRandom>,
|
||||
) -> Result<()>
|
||||
where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
@@ -29,81 +208,266 @@ where
|
||||
let user = success.user.clone();
|
||||
let peer = success.peer;
|
||||
let proto_tag = success.proto_tag;
|
||||
let pool_generation = me_pool.current_generation();
|
||||
|
||||
info!(
|
||||
debug!(
|
||||
user = %user,
|
||||
peer = %peer,
|
||||
dc = success.dc_idx,
|
||||
proto = ?proto_tag,
|
||||
mode = "middle_proxy",
|
||||
pool_generation,
|
||||
"Routing via Middle-End"
|
||||
);
|
||||
|
||||
let (conn_id, mut me_rx) = me_pool.registry().register().await;
|
||||
let (conn_id, me_rx) = me_pool.registry().register().await;
|
||||
let trace_id = conn_id;
|
||||
let bytes_me2c = Arc::new(AtomicU64::new(0));
|
||||
let mut forensics = RelayForensicsState {
|
||||
trace_id,
|
||||
conn_id,
|
||||
user: user.clone(),
|
||||
peer,
|
||||
peer_hash: hash_ip(peer.ip()),
|
||||
started_at: Instant::now(),
|
||||
bytes_c2me: 0,
|
||||
bytes_me2c: bytes_me2c.clone(),
|
||||
desync_all_full: config.general.desync_all_full,
|
||||
};
|
||||
|
||||
stats.increment_user_connects(&user);
|
||||
stats.increment_user_curr_connects(&user);
|
||||
stats.increment_current_connections_me();
|
||||
|
||||
let proto_flags = proto_flags_for_tag(proto_tag, me_pool.has_proxy_tag());
|
||||
// Per-user ad_tag from access.user_ad_tags; fallback to general.ad_tag (hot-reloadable)
|
||||
let user_tag: Option<Vec<u8>> = config
|
||||
.access
|
||||
.user_ad_tags
|
||||
.get(&user)
|
||||
.and_then(|s| hex::decode(s).ok())
|
||||
.filter(|v| v.len() == 16);
|
||||
let global_tag: Option<Vec<u8>> = config
|
||||
.general
|
||||
.ad_tag
|
||||
.as_ref()
|
||||
.and_then(|s| hex::decode(s).ok())
|
||||
.filter(|v| v.len() == 16);
|
||||
let effective_tag = user_tag.or(global_tag);
|
||||
|
||||
let proto_flags = proto_flags_for_tag(proto_tag, effective_tag.is_some());
|
||||
debug!(
|
||||
trace_id = format_args!("0x{:016x}", trace_id),
|
||||
user = %user,
|
||||
conn_id,
|
||||
peer_hash = format_args!("0x{:016x}", forensics.peer_hash),
|
||||
desync_all_full = forensics.desync_all_full,
|
||||
proto_flags = format_args!("0x{:08x}", proto_flags),
|
||||
pool_generation,
|
||||
"ME relay started"
|
||||
);
|
||||
|
||||
let translated_local_addr = me_pool.translate_our_addr(local_addr);
|
||||
|
||||
let result: Result<()> = loop {
|
||||
tokio::select! {
|
||||
client_frame = read_client_payload(&mut crypto_reader, proto_tag) => {
|
||||
match client_frame {
|
||||
Ok(Some(payload)) => {
|
||||
trace!(conn_id, bytes = payload.len(), "C->ME frame");
|
||||
stats.add_user_octets_from(&user, payload.len() as u64);
|
||||
me_pool.send_proxy_req(
|
||||
conn_id,
|
||||
success.dc_idx,
|
||||
peer,
|
||||
translated_local_addr,
|
||||
&payload,
|
||||
proto_flags,
|
||||
).await?;
|
||||
let frame_limit = config.general.max_client_frame;
|
||||
|
||||
let (c2me_tx, mut c2me_rx) = mpsc::channel::<C2MeCommand>(C2ME_CHANNEL_CAPACITY);
|
||||
let me_pool_c2me = me_pool.clone();
|
||||
let effective_tag = effective_tag;
|
||||
let c2me_sender = tokio::spawn(async move {
|
||||
let mut sent_since_yield = 0usize;
|
||||
while let Some(cmd) = c2me_rx.recv().await {
|
||||
match cmd {
|
||||
C2MeCommand::Data { payload, flags } => {
|
||||
me_pool_c2me.send_proxy_req(
|
||||
conn_id,
|
||||
success.dc_idx,
|
||||
peer,
|
||||
translated_local_addr,
|
||||
&payload,
|
||||
flags,
|
||||
effective_tag.as_deref(),
|
||||
).await?;
|
||||
sent_since_yield = sent_since_yield.saturating_add(1);
|
||||
if should_yield_c2me_sender(sent_since_yield, !c2me_rx.is_empty()) {
|
||||
sent_since_yield = 0;
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
Ok(None) => {
|
||||
debug!(conn_id, "Client EOF");
|
||||
let _ = me_pool.send_close(conn_id).await;
|
||||
break Ok(());
|
||||
}
|
||||
Err(e) => break Err(e),
|
||||
}
|
||||
}
|
||||
me_msg = me_rx.recv() => {
|
||||
match me_msg {
|
||||
Some(MeResponse::Data { flags, data }) => {
|
||||
trace!(conn_id, bytes = data.len(), flags, "ME->C data");
|
||||
stats.add_user_octets_to(&user, data.len() as u64);
|
||||
write_client_payload(&mut crypto_writer, proto_tag, flags, &data).await?;
|
||||
}
|
||||
Some(MeResponse::Ack(confirm)) => {
|
||||
trace!(conn_id, confirm, "ME->C quickack");
|
||||
write_client_ack(&mut crypto_writer, proto_tag, confirm).await?;
|
||||
}
|
||||
Some(MeResponse::Close) => {
|
||||
debug!(conn_id, "ME sent close");
|
||||
break Ok(());
|
||||
}
|
||||
None => {
|
||||
debug!(conn_id, "ME channel closed");
|
||||
break Err(ProxyError::Proxy("ME connection lost".into()));
|
||||
}
|
||||
C2MeCommand::Close => {
|
||||
let _ = me_pool_c2me.send_close(conn_id).await;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
|
||||
let (stop_tx, mut stop_rx) = oneshot::channel::<()>();
|
||||
let mut me_rx_task = me_rx;
|
||||
let stats_clone = stats.clone();
|
||||
let rng_clone = rng.clone();
|
||||
let user_clone = user.clone();
|
||||
let bytes_me2c_clone = bytes_me2c.clone();
|
||||
let me_writer = tokio::spawn(async move {
|
||||
let mut writer = crypto_writer;
|
||||
let mut frame_buf = Vec::with_capacity(16 * 1024);
|
||||
loop {
|
||||
tokio::select! {
|
||||
msg = me_rx_task.recv() => {
|
||||
match msg {
|
||||
Some(MeResponse::Data { flags, data }) => {
|
||||
trace!(conn_id, bytes = data.len(), flags, "ME->C data");
|
||||
bytes_me2c_clone.fetch_add(data.len() as u64, Ordering::Relaxed);
|
||||
stats_clone.add_user_octets_to(&user_clone, data.len() as u64);
|
||||
write_client_payload(
|
||||
&mut writer,
|
||||
proto_tag,
|
||||
flags,
|
||||
&data,
|
||||
rng_clone.as_ref(),
|
||||
&mut frame_buf,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Drain all immediately queued ME responses and flush once.
|
||||
while let Ok(next) = me_rx_task.try_recv() {
|
||||
match next {
|
||||
MeResponse::Data { flags, data } => {
|
||||
trace!(conn_id, bytes = data.len(), flags, "ME->C data (batched)");
|
||||
bytes_me2c_clone.fetch_add(data.len() as u64, Ordering::Relaxed);
|
||||
stats_clone.add_user_octets_to(&user_clone, data.len() as u64);
|
||||
write_client_payload(
|
||||
&mut writer,
|
||||
proto_tag,
|
||||
flags,
|
||||
&data,
|
||||
rng_clone.as_ref(),
|
||||
&mut frame_buf,
|
||||
).await?;
|
||||
}
|
||||
MeResponse::Ack(confirm) => {
|
||||
trace!(conn_id, confirm, "ME->C quickack (batched)");
|
||||
write_client_ack(&mut writer, proto_tag, confirm).await?;
|
||||
}
|
||||
MeResponse::Close => {
|
||||
debug!(conn_id, "ME sent close (batched)");
|
||||
let _ = writer.flush().await;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
writer.flush().await.map_err(ProxyError::Io)?;
|
||||
}
|
||||
Some(MeResponse::Ack(confirm)) => {
|
||||
trace!(conn_id, confirm, "ME->C quickack");
|
||||
write_client_ack(&mut writer, proto_tag, confirm).await?;
|
||||
}
|
||||
Some(MeResponse::Close) => {
|
||||
debug!(conn_id, "ME sent close");
|
||||
let _ = writer.flush().await;
|
||||
return Ok(());
|
||||
}
|
||||
None => {
|
||||
debug!(conn_id, "ME channel closed");
|
||||
return Err(ProxyError::Proxy("ME connection lost".into()));
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = &mut stop_rx => {
|
||||
debug!(conn_id, "ME writer stop signal");
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let mut main_result: Result<()> = Ok(());
|
||||
let mut client_closed = false;
|
||||
let mut frame_counter: u64 = 0;
|
||||
loop {
|
||||
match read_client_payload(
|
||||
&mut crypto_reader,
|
||||
proto_tag,
|
||||
frame_limit,
|
||||
&forensics,
|
||||
&mut frame_counter,
|
||||
&stats,
|
||||
).await {
|
||||
Ok(Some((payload, quickack))) => {
|
||||
trace!(conn_id, bytes = payload.len(), "C->ME frame");
|
||||
forensics.bytes_c2me = forensics
|
||||
.bytes_c2me
|
||||
.saturating_add(payload.len() as u64);
|
||||
stats.add_user_octets_from(&user, payload.len() as u64);
|
||||
let mut flags = proto_flags;
|
||||
if quickack {
|
||||
flags |= RPC_FLAG_QUICKACK;
|
||||
}
|
||||
if payload.len() >= 8 && payload[..8].iter().all(|b| *b == 0) {
|
||||
flags |= RPC_FLAG_NOT_ENCRYPTED;
|
||||
}
|
||||
// Keep client read loop lightweight: route heavy ME send path via a dedicated task.
|
||||
if enqueue_c2me_command(&c2me_tx, C2MeCommand::Data { payload, flags })
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
main_result = Err(ProxyError::Proxy("ME sender channel closed".into()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(None) => {
|
||||
debug!(conn_id, "Client EOF");
|
||||
client_closed = true;
|
||||
let _ = enqueue_c2me_command(&c2me_tx, C2MeCommand::Close).await;
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
main_result = Err(e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
drop(c2me_tx);
|
||||
let c2me_result = c2me_sender
|
||||
.await
|
||||
.unwrap_or_else(|e| Err(ProxyError::Proxy(format!("ME sender join error: {e}"))));
|
||||
|
||||
let _ = stop_tx.send(());
|
||||
let mut writer_result = me_writer
|
||||
.await
|
||||
.unwrap_or_else(|e| Err(ProxyError::Proxy(format!("ME writer join error: {e}"))));
|
||||
|
||||
// When client closes, but ME channel stopped as unregistered - it isnt error
|
||||
if client_closed
|
||||
&& matches!(
|
||||
writer_result,
|
||||
Err(ProxyError::Proxy(ref msg)) if msg == "ME connection lost"
|
||||
)
|
||||
{
|
||||
writer_result = Ok(());
|
||||
}
|
||||
|
||||
let result = match (main_result, c2me_result, writer_result) {
|
||||
(Ok(()), Ok(()), Ok(())) => Ok(()),
|
||||
(Err(e), _, _) => Err(e),
|
||||
(_, Err(e), _) => Err(e),
|
||||
(_, _, Err(e)) => Err(e),
|
||||
};
|
||||
|
||||
debug!(user = %user, conn_id, "ME relay cleanup");
|
||||
debug!(
|
||||
user = %user,
|
||||
conn_id,
|
||||
trace_id = format_args!("0x{:016x}", trace_id),
|
||||
duration_ms = forensics.started_at.elapsed().as_millis() as u64,
|
||||
bytes_c2me = forensics.bytes_c2me,
|
||||
bytes_me2c = forensics.bytes_me2c.load(Ordering::Relaxed),
|
||||
frames_ok = frame_counter,
|
||||
"ME relay cleanup"
|
||||
);
|
||||
me_pool.registry().unregister(conn_id).await;
|
||||
stats.decrement_current_connections_me();
|
||||
stats.decrement_user_curr_connects(&user);
|
||||
result
|
||||
}
|
||||
@@ -111,55 +475,111 @@ where
|
||||
async fn read_client_payload<R>(
|
||||
client_reader: &mut CryptoReader<R>,
|
||||
proto_tag: ProtoTag,
|
||||
) -> Result<Option<Vec<u8>>>
|
||||
max_frame: usize,
|
||||
forensics: &RelayForensicsState,
|
||||
frame_counter: &mut u64,
|
||||
stats: &Stats,
|
||||
) -> Result<Option<(Vec<u8>, bool)>>
|
||||
where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
{
|
||||
let len = match proto_tag {
|
||||
ProtoTag::Abridged => {
|
||||
let mut first = [0u8; 1];
|
||||
match client_reader.read_exact(&mut first).await {
|
||||
Ok(_) => {}
|
||||
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => return Ok(None),
|
||||
Err(e) => return Err(ProxyError::Io(e)),
|
||||
loop {
|
||||
let (len, quickack, raw_len_bytes) = match proto_tag {
|
||||
ProtoTag::Abridged => {
|
||||
let mut first = [0u8; 1];
|
||||
match client_reader.read_exact(&mut first).await {
|
||||
Ok(_) => {}
|
||||
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => return Ok(None),
|
||||
Err(e) => return Err(ProxyError::Io(e)),
|
||||
}
|
||||
|
||||
let quickack = (first[0] & 0x80) != 0;
|
||||
let len_words = if (first[0] & 0x7f) == 0x7f {
|
||||
let mut ext = [0u8; 3];
|
||||
client_reader
|
||||
.read_exact(&mut ext)
|
||||
.await
|
||||
.map_err(ProxyError::Io)?;
|
||||
u32::from_le_bytes([ext[0], ext[1], ext[2], 0]) as usize
|
||||
} else {
|
||||
(first[0] & 0x7f) as usize
|
||||
};
|
||||
|
||||
let len = len_words
|
||||
.checked_mul(4)
|
||||
.ok_or_else(|| ProxyError::Proxy("Abridged frame length overflow".into()))?;
|
||||
(len, quickack, None)
|
||||
}
|
||||
|
||||
let len_words = if (first[0] & 0x7f) == 0x7f {
|
||||
let mut ext = [0u8; 3];
|
||||
client_reader
|
||||
.read_exact(&mut ext)
|
||||
.await
|
||||
.map_err(ProxyError::Io)?;
|
||||
u32::from_le_bytes([ext[0], ext[1], ext[2], 0]) as usize
|
||||
} else {
|
||||
(first[0] & 0x7f) as usize
|
||||
};
|
||||
|
||||
len_words
|
||||
.checked_mul(4)
|
||||
.ok_or_else(|| ProxyError::Proxy("Abridged frame length overflow".into()))?
|
||||
}
|
||||
ProtoTag::Intermediate | ProtoTag::Secure => {
|
||||
let mut len_buf = [0u8; 4];
|
||||
match client_reader.read_exact(&mut len_buf).await {
|
||||
Ok(_) => {}
|
||||
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => return Ok(None),
|
||||
Err(e) => return Err(ProxyError::Io(e)),
|
||||
ProtoTag::Intermediate | ProtoTag::Secure => {
|
||||
let mut len_buf = [0u8; 4];
|
||||
match client_reader.read_exact(&mut len_buf).await {
|
||||
Ok(_) => {}
|
||||
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => return Ok(None),
|
||||
Err(e) => return Err(ProxyError::Io(e)),
|
||||
}
|
||||
let quickack = (len_buf[3] & 0x80) != 0;
|
||||
(
|
||||
(u32::from_le_bytes(len_buf) & 0x7fff_ffff) as usize,
|
||||
quickack,
|
||||
Some(len_buf),
|
||||
)
|
||||
}
|
||||
(u32::from_le_bytes(len_buf) & 0x7fff_ffff) as usize
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
if len > 16 * 1024 * 1024 {
|
||||
return Err(ProxyError::Proxy(format!("Frame too large: {len}")));
|
||||
if len == 0 {
|
||||
continue;
|
||||
}
|
||||
if len < 4 && proto_tag != ProtoTag::Abridged {
|
||||
warn!(
|
||||
trace_id = format_args!("0x{:016x}", forensics.trace_id),
|
||||
conn_id = forensics.conn_id,
|
||||
user = %forensics.user,
|
||||
len,
|
||||
proto = ?proto_tag,
|
||||
"Frame too small — corrupt or probe"
|
||||
);
|
||||
return Err(ProxyError::Proxy(format!("Frame too small: {len}")));
|
||||
}
|
||||
|
||||
if len > max_frame {
|
||||
return Err(report_desync_frame_too_large(
|
||||
forensics,
|
||||
proto_tag,
|
||||
*frame_counter,
|
||||
max_frame,
|
||||
len,
|
||||
raw_len_bytes,
|
||||
stats,
|
||||
));
|
||||
}
|
||||
|
||||
let secure_payload_len = if proto_tag == ProtoTag::Secure {
|
||||
match secure_payload_len_from_wire_len(len) {
|
||||
Some(payload_len) => payload_len,
|
||||
None => {
|
||||
stats.increment_secure_padding_invalid();
|
||||
return Err(ProxyError::Proxy(format!(
|
||||
"Invalid secure frame length: {len}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
len
|
||||
};
|
||||
|
||||
let mut payload = vec![0u8; len];
|
||||
client_reader
|
||||
.read_exact(&mut payload)
|
||||
.await
|
||||
.map_err(ProxyError::Io)?;
|
||||
|
||||
// Secure Intermediate: strip validated trailing padding bytes.
|
||||
if proto_tag == ProtoTag::Secure {
|
||||
payload.truncate(secure_payload_len);
|
||||
}
|
||||
*frame_counter += 1;
|
||||
return Ok(Some((payload, quickack)));
|
||||
}
|
||||
|
||||
let mut payload = vec![0u8; len];
|
||||
client_reader
|
||||
.read_exact(&mut payload)
|
||||
.await
|
||||
.map_err(ProxyError::Io)?;
|
||||
Ok(Some(payload))
|
||||
}
|
||||
|
||||
async fn write_client_payload<W>(
|
||||
@@ -167,6 +587,8 @@ async fn write_client_payload<W>(
|
||||
proto_tag: ProtoTag,
|
||||
flags: u32,
|
||||
data: &[u8],
|
||||
rng: &SecureRandom,
|
||||
frame_buf: &mut Vec<u8>,
|
||||
) -> Result<()>
|
||||
where
|
||||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
@@ -175,7 +597,7 @@ where
|
||||
|
||||
match proto_tag {
|
||||
ProtoTag::Abridged => {
|
||||
if data.len() % 4 != 0 {
|
||||
if !data.len().is_multiple_of(4) {
|
||||
return Err(ProxyError::Proxy(format!(
|
||||
"Abridged payload must be 4-byte aligned, got {}",
|
||||
data.len()
|
||||
@@ -188,8 +610,12 @@ where
|
||||
if quickack {
|
||||
first |= 0x80;
|
||||
}
|
||||
frame_buf.clear();
|
||||
frame_buf.reserve(1 + data.len());
|
||||
frame_buf.push(first);
|
||||
frame_buf.extend_from_slice(data);
|
||||
client_writer
|
||||
.write_all(&[first])
|
||||
.write_all(frame_buf)
|
||||
.await
|
||||
.map_err(ProxyError::Io)?;
|
||||
} else if len_words < (1 << 24) {
|
||||
@@ -198,8 +624,12 @@ where
|
||||
first |= 0x80;
|
||||
}
|
||||
let lw = (len_words as u32).to_le_bytes();
|
||||
frame_buf.clear();
|
||||
frame_buf.reserve(4 + data.len());
|
||||
frame_buf.extend_from_slice(&[first, lw[0], lw[1], lw[2]]);
|
||||
frame_buf.extend_from_slice(data);
|
||||
client_writer
|
||||
.write_all(&[first, lw[0], lw[1], lw[2]])
|
||||
.write_all(frame_buf)
|
||||
.await
|
||||
.map_err(ProxyError::Io)?;
|
||||
} else {
|
||||
@@ -208,29 +638,41 @@ where
|
||||
data.len()
|
||||
)));
|
||||
}
|
||||
|
||||
client_writer
|
||||
.write_all(data)
|
||||
.await
|
||||
.map_err(ProxyError::Io)?;
|
||||
}
|
||||
ProtoTag::Intermediate | ProtoTag::Secure => {
|
||||
let mut len = data.len() as u32;
|
||||
let padding_len = if proto_tag == ProtoTag::Secure {
|
||||
if !is_valid_secure_payload_len(data.len()) {
|
||||
return Err(ProxyError::Proxy(format!(
|
||||
"Secure payload must be 4-byte aligned, got {}",
|
||||
data.len()
|
||||
)));
|
||||
}
|
||||
secure_padding_len(data.len(), rng)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let mut len_val = (data.len() + padding_len) as u32;
|
||||
if quickack {
|
||||
len |= 0x8000_0000;
|
||||
len_val |= 0x8000_0000;
|
||||
}
|
||||
let total = 4 + data.len() + padding_len;
|
||||
frame_buf.clear();
|
||||
frame_buf.reserve(total);
|
||||
frame_buf.extend_from_slice(&len_val.to_le_bytes());
|
||||
frame_buf.extend_from_slice(data);
|
||||
if padding_len > 0 {
|
||||
let start = frame_buf.len();
|
||||
frame_buf.resize(start + padding_len, 0);
|
||||
rng.fill(&mut frame_buf[start..]);
|
||||
}
|
||||
client_writer
|
||||
.write_all(&len.to_le_bytes())
|
||||
.await
|
||||
.map_err(ProxyError::Io)?;
|
||||
client_writer
|
||||
.write_all(data)
|
||||
.write_all(frame_buf)
|
||||
.await
|
||||
.map_err(ProxyError::Io)?;
|
||||
}
|
||||
}
|
||||
|
||||
client_writer.flush().await.map_err(ProxyError::Io)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn write_client_ack<W>(
|
||||
@@ -250,5 +692,87 @@ where
|
||||
.write_all(&bytes)
|
||||
.await
|
||||
.map_err(ProxyError::Io)?;
|
||||
// ACK should remain low-latency.
|
||||
client_writer.flush().await.map_err(ProxyError::Io)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tokio::time::{Duration as TokioDuration, timeout};
|
||||
|
||||
#[test]
|
||||
fn should_yield_sender_only_on_budget_with_backlog() {
|
||||
assert!(!should_yield_c2me_sender(0, true));
|
||||
assert!(!should_yield_c2me_sender(C2ME_SENDER_FAIRNESS_BUDGET - 1, true));
|
||||
assert!(!should_yield_c2me_sender(C2ME_SENDER_FAIRNESS_BUDGET, false));
|
||||
assert!(should_yield_c2me_sender(C2ME_SENDER_FAIRNESS_BUDGET, true));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn enqueue_c2me_command_uses_try_send_fast_path() {
|
||||
let (tx, mut rx) = mpsc::channel::<C2MeCommand>(2);
|
||||
enqueue_c2me_command(
|
||||
&tx,
|
||||
C2MeCommand::Data {
|
||||
payload: vec![1, 2, 3],
|
||||
flags: 0,
|
||||
},
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let recv = timeout(TokioDuration::from_millis(50), rx.recv())
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
match recv {
|
||||
C2MeCommand::Data { payload, flags } => {
|
||||
assert_eq!(payload, vec![1, 2, 3]);
|
||||
assert_eq!(flags, 0);
|
||||
}
|
||||
C2MeCommand::Close => panic!("unexpected close command"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn enqueue_c2me_command_falls_back_to_send_when_queue_is_full() {
|
||||
let (tx, mut rx) = mpsc::channel::<C2MeCommand>(1);
|
||||
tx.send(C2MeCommand::Data {
|
||||
payload: vec![9],
|
||||
flags: 9,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let tx2 = tx.clone();
|
||||
let producer = tokio::spawn(async move {
|
||||
enqueue_c2me_command(
|
||||
&tx2,
|
||||
C2MeCommand::Data {
|
||||
payload: vec![7, 7],
|
||||
flags: 7,
|
||||
},
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
});
|
||||
|
||||
let _ = timeout(TokioDuration::from_millis(100), rx.recv())
|
||||
.await
|
||||
.unwrap();
|
||||
producer.await.unwrap();
|
||||
|
||||
let recv = timeout(TokioDuration::from_millis(100), rx.recv())
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
match recv {
|
||||
C2MeCommand::Data { payload, flags } => {
|
||||
assert_eq!(payload, vec![7, 7]);
|
||||
assert_eq!(flags, 7);
|
||||
}
|
||||
C2MeCommand::Close => panic!("unexpected close command"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,9 @@ pub mod middle_relay;
|
||||
pub mod relay;
|
||||
|
||||
pub use client::ClientHandler;
|
||||
#[allow(unused_imports)]
|
||||
pub use handshake::*;
|
||||
#[allow(unused_imports)]
|
||||
pub use masking::*;
|
||||
#[allow(unused_imports)]
|
||||
pub use relay::*;
|
||||
|
||||
117
src/stats/beobachten.rs
Normal file
117
src/stats/beobachten.rs
Normal file
@@ -0,0 +1,117 @@
|
||||
//! Per-IP forensic buckets for scanner and handshake failure observation.
|
||||
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::net::IpAddr;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use parking_lot::Mutex;
|
||||
|
||||
const CLEANUP_INTERVAL: Duration = Duration::from_secs(30);
|
||||
|
||||
#[derive(Default)]
|
||||
struct BeobachtenInner {
|
||||
entries: HashMap<(String, IpAddr), BeobachtenEntry>,
|
||||
last_cleanup: Option<Instant>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct BeobachtenEntry {
|
||||
tries: u64,
|
||||
last_seen: Instant,
|
||||
}
|
||||
|
||||
/// In-memory, TTL-scoped per-IP counters keyed by source class.
|
||||
pub struct BeobachtenStore {
|
||||
inner: Mutex<BeobachtenInner>,
|
||||
}
|
||||
|
||||
impl Default for BeobachtenStore {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl BeobachtenStore {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
inner: Mutex::new(BeobachtenInner::default()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn record(&self, class: &str, ip: IpAddr, ttl: Duration) {
|
||||
if class.is_empty() || ttl.is_zero() {
|
||||
return;
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
let mut guard = self.inner.lock();
|
||||
Self::cleanup_if_needed(&mut guard, now, ttl);
|
||||
|
||||
let key = (class.to_string(), ip);
|
||||
let entry = guard.entries.entry(key).or_insert(BeobachtenEntry {
|
||||
tries: 0,
|
||||
last_seen: now,
|
||||
});
|
||||
entry.tries = entry.tries.saturating_add(1);
|
||||
entry.last_seen = now;
|
||||
}
|
||||
|
||||
pub fn snapshot_text(&self, ttl: Duration) -> String {
|
||||
if ttl.is_zero() {
|
||||
return "beobachten disabled\n".to_string();
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
let mut guard = self.inner.lock();
|
||||
Self::cleanup(&mut guard, now, ttl);
|
||||
guard.last_cleanup = Some(now);
|
||||
|
||||
let mut grouped = BTreeMap::<String, Vec<(IpAddr, u64)>>::new();
|
||||
for ((class, ip), entry) in &guard.entries {
|
||||
grouped
|
||||
.entry(class.clone())
|
||||
.or_default()
|
||||
.push((*ip, entry.tries));
|
||||
}
|
||||
|
||||
if grouped.is_empty() {
|
||||
return "empty\n".to_string();
|
||||
}
|
||||
|
||||
let mut out = String::with_capacity(grouped.len() * 64);
|
||||
for (class, entries) in &mut grouped {
|
||||
out.push('[');
|
||||
out.push_str(class);
|
||||
out.push_str("]\n");
|
||||
|
||||
entries.sort_by(|(ip_a, tries_a), (ip_b, tries_b)| {
|
||||
tries_b
|
||||
.cmp(tries_a)
|
||||
.then_with(|| ip_a.to_string().cmp(&ip_b.to_string()))
|
||||
});
|
||||
|
||||
for (ip, tries) in entries {
|
||||
out.push_str(&format!("{ip}-{tries}\n"));
|
||||
}
|
||||
}
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
fn cleanup_if_needed(inner: &mut BeobachtenInner, now: Instant, ttl: Duration) {
|
||||
let should_cleanup = match inner.last_cleanup {
|
||||
Some(last) => now.saturating_duration_since(last) >= CLEANUP_INTERVAL,
|
||||
None => true,
|
||||
};
|
||||
if should_cleanup {
|
||||
Self::cleanup(inner, now, ttl);
|
||||
inner.last_cleanup = Some(now);
|
||||
}
|
||||
}
|
||||
|
||||
fn cleanup(inner: &mut BeobachtenInner, now: Instant, ttl: Duration) {
|
||||
inner.entries.retain(|_, entry| {
|
||||
now.saturating_duration_since(entry.last_seen) <= ttl
|
||||
});
|
||||
}
|
||||
}
|
||||
1114
src/stats/mod.rs
1114
src/stats/mod.rs
File diff suppressed because it is too large
Load Diff
29
src/stats/telemetry.rs
Normal file
29
src/stats/telemetry.rs
Normal file
@@ -0,0 +1,29 @@
|
||||
use crate::config::{MeTelemetryLevel, TelemetryConfig};
|
||||
|
||||
/// Runtime telemetry policy used by hot-path counters.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct TelemetryPolicy {
|
||||
pub core_enabled: bool,
|
||||
pub user_enabled: bool,
|
||||
pub me_level: MeTelemetryLevel,
|
||||
}
|
||||
|
||||
impl Default for TelemetryPolicy {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
core_enabled: true,
|
||||
user_enabled: true,
|
||||
me_level: MeTelemetryLevel::Normal,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TelemetryPolicy {
|
||||
pub fn from_config(cfg: &TelemetryConfig) -> Self {
|
||||
Self {
|
||||
core_enabled: cfg.core_enabled,
|
||||
user_enabled: cfg.user_enabled,
|
||||
me_level: cfg.me_level,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,8 @@
|
||||
//! This module provides a thread-safe pool of BytesMut buffers
|
||||
//! that can be reused across connections to reduce allocation pressure.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use bytes::BytesMut;
|
||||
use crossbeam_queue::ArrayQueue;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
@@ -381,9 +383,14 @@ mod tests {
|
||||
// Add a buffer to pool
|
||||
pool.preallocate(1);
|
||||
|
||||
// Now try_get should succeed
|
||||
assert!(pool.try_get().is_some());
|
||||
// Now try_get should succeed once while the buffer is held
|
||||
let buf = pool.try_get();
|
||||
assert!(buf.is_some());
|
||||
// While buffer is held, pool is empty
|
||||
assert!(pool.try_get().is_none());
|
||||
// Drop buffer -> returns to pool, should be obtainable again
|
||||
drop(buf);
|
||||
assert!(pool.try_get().is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -448,4 +455,4 @@ mod tests {
|
||||
// All buffers should be returned
|
||||
assert!(stats.pooled > 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@
|
||||
//! is either written to upstream or stored in our pending buffer
|
||||
//! - when upstream is pending -> ciphertext is buffered/bounded and backpressure is applied
|
||||
//!
|
||||
|
||||
#![allow(dead_code)]
|
||||
//! =======================
|
||||
//! Writer state machine
|
||||
//! =======================
|
||||
@@ -34,7 +36,7 @@
|
||||
//! └────────────────────────────────────────┘
|
||||
//!
|
||||
//! Backpressure
|
||||
//! - pending ciphertext buffer is bounded (MAX_PENDING_WRITE)
|
||||
//! - pending ciphertext buffer is bounded (configurable per connection)
|
||||
//! - pending is full and upstream is pending
|
||||
//! -> poll_write returns Poll::Pending
|
||||
//! -> do not accept any plaintext
|
||||
@@ -45,7 +47,7 @@
|
||||
//! - when upstream is Pending but pending still has room: accept `to_accept` bytes and
|
||||
//! encrypt+append ciphertext directly into pending (in-place encryption of appended range)
|
||||
|
||||
//! Encrypted stream wrappers using AES-CTR
|
||||
//! Encrypted stream wrappers using AES-CTR
|
||||
//!
|
||||
//! This module provides stateful async stream wrappers that handle
|
||||
//! encryption/decryption with proper partial read/write handling.
|
||||
@@ -55,17 +57,16 @@ use std::io::{self, ErrorKind, Result};
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
|
||||
use tracing::{debug, trace, warn};
|
||||
use tracing::{debug, trace};
|
||||
|
||||
use crate::crypto::AesCtr;
|
||||
use super::state::{StreamState, YieldBuffer};
|
||||
|
||||
// ============= Constants =============
|
||||
|
||||
/// Maximum size for pending ciphertext buffer (bounded backpressure).
|
||||
/// Reduced to 64KB to prevent bufferbloat on mobile networks.
|
||||
/// 512KB was causing high latency on 3G/LTE connections.
|
||||
const MAX_PENDING_WRITE: usize = 64 * 1024;
|
||||
/// Default size for pending ciphertext buffer (bounded backpressure).
|
||||
/// Actual limit is supplied at runtime from configuration.
|
||||
const DEFAULT_MAX_PENDING_WRITE: usize = 64 * 1024;
|
||||
|
||||
/// Default read buffer capacity (reader mostly decrypts in-place into caller buffer).
|
||||
const DEFAULT_READ_CAPACITY: usize = 16 * 1024;
|
||||
@@ -152,9 +153,9 @@ impl<R> CryptoReader<R> {
|
||||
fn take_poison_error(&mut self) -> io::Error {
|
||||
match &mut self.state {
|
||||
CryptoReaderState::Poisoned { error } => error.take().unwrap_or_else(|| {
|
||||
io::Error::new(ErrorKind::Other, "stream previously poisoned")
|
||||
io::Error::other("stream previously poisoned")
|
||||
}),
|
||||
_ => io::Error::new(ErrorKind::Other, "stream not poisoned"),
|
||||
_ => io::Error::other("stream not poisoned"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -167,6 +168,7 @@ impl<R: AsyncRead + Unpin> AsyncRead for CryptoReader<R> {
|
||||
) -> Poll<Result<()>> {
|
||||
let this = self.get_mut();
|
||||
|
||||
#[allow(clippy::never_loop)]
|
||||
loop {
|
||||
match &mut this.state {
|
||||
CryptoReaderState::Poisoned { .. } => {
|
||||
@@ -334,22 +336,35 @@ impl PendingCiphertext {
|
||||
}
|
||||
|
||||
fn remaining_capacity(&self) -> usize {
|
||||
self.max_len.saturating_sub(self.buf.len())
|
||||
self.max_len.saturating_sub(self.pending_len())
|
||||
}
|
||||
|
||||
fn compact_consumed_prefix(&mut self) {
|
||||
if self.pos == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
if self.pos >= self.buf.len() {
|
||||
self.buf.clear();
|
||||
self.pos = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
let _ = self.buf.split_to(self.pos);
|
||||
self.pos = 0;
|
||||
}
|
||||
|
||||
fn advance(&mut self, n: usize) {
|
||||
self.pos = (self.pos + n).min(self.buf.len());
|
||||
|
||||
if self.pos == self.buf.len() {
|
||||
self.buf.clear();
|
||||
self.pos = 0;
|
||||
self.compact_consumed_prefix();
|
||||
return;
|
||||
}
|
||||
|
||||
// Compact when a large prefix was consumed.
|
||||
if self.pos >= 16 * 1024 {
|
||||
let _ = self.buf.split_to(self.pos);
|
||||
self.pos = 0;
|
||||
self.compact_consumed_prefix();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -377,6 +392,11 @@ impl PendingCiphertext {
|
||||
));
|
||||
}
|
||||
|
||||
// Reclaim consumed prefix when physical storage is the only limiter.
|
||||
if self.pos > 0 && self.buf.len() + plaintext.len() > self.max_len {
|
||||
self.compact_consumed_prefix();
|
||||
}
|
||||
|
||||
let start = self.buf.len();
|
||||
self.buf.reserve(plaintext.len());
|
||||
self.buf.extend_from_slice(plaintext);
|
||||
@@ -427,15 +447,22 @@ pub struct CryptoWriter<W> {
|
||||
encryptor: AesCtr,
|
||||
state: CryptoWriterState,
|
||||
scratch: BytesMut,
|
||||
max_pending_write: usize,
|
||||
}
|
||||
|
||||
impl<W> CryptoWriter<W> {
|
||||
pub fn new(upstream: W, encryptor: AesCtr) -> Self {
|
||||
pub fn new(upstream: W, encryptor: AesCtr, max_pending_write: usize) -> Self {
|
||||
let max_pending = if max_pending_write == 0 {
|
||||
DEFAULT_MAX_PENDING_WRITE
|
||||
} else {
|
||||
max_pending_write
|
||||
};
|
||||
Self {
|
||||
upstream,
|
||||
encryptor,
|
||||
state: CryptoWriterState::Idle,
|
||||
scratch: BytesMut::with_capacity(16 * 1024),
|
||||
max_pending_write: max_pending.max(4 * 1024),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -477,17 +504,17 @@ impl<W> CryptoWriter<W> {
|
||||
fn take_poison_error(&mut self) -> io::Error {
|
||||
match &mut self.state {
|
||||
CryptoWriterState::Poisoned { error } => error.take().unwrap_or_else(|| {
|
||||
io::Error::new(ErrorKind::Other, "stream previously poisoned")
|
||||
io::Error::other("stream previously poisoned")
|
||||
}),
|
||||
_ => io::Error::new(ErrorKind::Other, "stream not poisoned"),
|
||||
_ => io::Error::other("stream not poisoned"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensure we are in Flushing state and return mutable pending buffer.
|
||||
fn ensure_pending<'a>(state: &'a mut CryptoWriterState) -> &'a mut PendingCiphertext {
|
||||
fn ensure_pending(state: &mut CryptoWriterState, max_pending: usize) -> &mut PendingCiphertext {
|
||||
if matches!(state, CryptoWriterState::Idle) {
|
||||
*state = CryptoWriterState::Flushing {
|
||||
pending: PendingCiphertext::new(MAX_PENDING_WRITE),
|
||||
pending: PendingCiphertext::new(max_pending),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -498,14 +525,14 @@ impl<W> CryptoWriter<W> {
|
||||
}
|
||||
|
||||
/// Select how many plaintext bytes can be accepted in buffering path
|
||||
fn select_to_accept_for_buffering(state: &CryptoWriterState, buf_len: usize) -> usize {
|
||||
fn select_to_accept_for_buffering(state: &CryptoWriterState, buf_len: usize, max_pending: usize) -> usize {
|
||||
if buf_len == 0 {
|
||||
return 0;
|
||||
}
|
||||
|
||||
match state {
|
||||
CryptoWriterState::Flushing { pending } => buf_len.min(pending.remaining_capacity()),
|
||||
CryptoWriterState::Idle => buf_len.min(MAX_PENDING_WRITE),
|
||||
CryptoWriterState::Idle => buf_len.min(max_pending),
|
||||
CryptoWriterState::Poisoned { .. } => 0,
|
||||
}
|
||||
}
|
||||
@@ -603,7 +630,7 @@ impl<W: AsyncWrite + Unpin> AsyncWrite for CryptoWriter<W> {
|
||||
Poll::Pending => {
|
||||
// Upstream blocked. Apply ideal backpressure
|
||||
let to_accept =
|
||||
Self::select_to_accept_for_buffering(&this.state, buf.len());
|
||||
Self::select_to_accept_for_buffering(&this.state, buf.len(), this.max_pending_write);
|
||||
|
||||
if to_accept == 0 {
|
||||
trace!(
|
||||
@@ -618,7 +645,7 @@ impl<W: AsyncWrite + Unpin> AsyncWrite for CryptoWriter<W> {
|
||||
|
||||
// Disjoint borrows
|
||||
let encryptor = &mut this.encryptor;
|
||||
let pending = Self::ensure_pending(&mut this.state);
|
||||
let pending = Self::ensure_pending(&mut this.state, this.max_pending_write);
|
||||
|
||||
if let Err(e) = pending.push_encrypted(encryptor, plaintext) {
|
||||
if e.kind() == ErrorKind::WouldBlock {
|
||||
@@ -635,7 +662,7 @@ impl<W: AsyncWrite + Unpin> AsyncWrite for CryptoWriter<W> {
|
||||
// 2) Fast path: pending empty -> write-through
|
||||
debug_assert!(matches!(this.state, CryptoWriterState::Idle));
|
||||
|
||||
let to_accept = buf.len().min(MAX_PENDING_WRITE);
|
||||
let to_accept = buf.len().min(this.max_pending_write);
|
||||
let plaintext = &buf[..to_accept];
|
||||
|
||||
Self::encrypt_into_scratch(&mut this.encryptor, &mut this.scratch, plaintext);
|
||||
@@ -645,7 +672,7 @@ impl<W: AsyncWrite + Unpin> AsyncWrite for CryptoWriter<W> {
|
||||
// Upstream blocked: buffer FULL ciphertext for accepted bytes.
|
||||
let ciphertext = std::mem::take(&mut this.scratch);
|
||||
|
||||
let pending = Self::ensure_pending(&mut this.state);
|
||||
let pending = Self::ensure_pending(&mut this.state, this.max_pending_write);
|
||||
pending.replace_with(ciphertext);
|
||||
|
||||
Poll::Ready(Ok(to_accept))
|
||||
@@ -672,7 +699,7 @@ impl<W: AsyncWrite + Unpin> AsyncWrite for CryptoWriter<W> {
|
||||
let remainder = this.scratch.split_off(n);
|
||||
this.scratch.clear();
|
||||
|
||||
let pending = Self::ensure_pending(&mut this.state);
|
||||
let pending = Self::ensure_pending(&mut this.state, this.max_pending_write);
|
||||
pending.replace_with(remainder);
|
||||
|
||||
Poll::Ready(Ok(to_accept))
|
||||
@@ -767,4 +794,71 @@ impl<S: AsyncWrite + Unpin> AsyncWrite for PassthroughStream<S> {
|
||||
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
|
||||
Pin::new(&mut self.inner).poll_shutdown(cx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn test_ctr() -> AesCtr {
|
||||
AesCtr::new(&[0x11; 32], 0x0102_0304_0506_0708_1112_1314_1516_1718)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pending_capacity_reclaims_after_partial_advance_without_compaction_threshold() {
|
||||
let mut pending = PendingCiphertext::new(1024);
|
||||
let mut ctr = test_ctr();
|
||||
let payload = vec![0x41; 900];
|
||||
pending.push_encrypted(&mut ctr, &payload).unwrap();
|
||||
|
||||
// Keep position below compaction threshold to validate logical-capacity accounting.
|
||||
pending.advance(800);
|
||||
assert_eq!(pending.pending_len(), 100);
|
||||
assert_eq!(pending.remaining_capacity(), 924);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn push_encrypted_respects_pending_limit() {
|
||||
let mut pending = PendingCiphertext::new(64);
|
||||
let mut ctr = test_ctr();
|
||||
|
||||
pending.push_encrypted(&mut ctr, &[0x10; 64]).unwrap();
|
||||
let err = pending.push_encrypted(&mut ctr, &[0x20]).unwrap_err();
|
||||
assert_eq!(err.kind(), ErrorKind::WouldBlock);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn push_encrypted_compacts_prefix_when_physical_buffer_would_overflow() {
|
||||
let mut pending = PendingCiphertext::new(64);
|
||||
let mut ctr = test_ctr();
|
||||
|
||||
pending.push_encrypted(&mut ctr, &[0x22; 60]).unwrap();
|
||||
pending.advance(30);
|
||||
pending.push_encrypted(&mut ctr, &[0x33; 30]).unwrap();
|
||||
|
||||
assert_eq!(pending.pending_len(), 60);
|
||||
assert!(pending.buf.len() <= 64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pending_ciphertext_preserves_stream_order_across_drain_and_append() {
|
||||
let mut pending = PendingCiphertext::new(128);
|
||||
let mut ctr = test_ctr();
|
||||
|
||||
let first = vec![0xA1; 80];
|
||||
let second = vec![0xB2; 40];
|
||||
|
||||
pending.push_encrypted(&mut ctr, &first).unwrap();
|
||||
pending.advance(50);
|
||||
pending.push_encrypted(&mut ctr, &second).unwrap();
|
||||
|
||||
let mut baseline_ctr = test_ctr();
|
||||
let mut baseline_plain = Vec::with_capacity(first.len() + second.len());
|
||||
baseline_plain.extend_from_slice(&first);
|
||||
baseline_plain.extend_from_slice(&second);
|
||||
baseline_ctr.apply(&mut baseline_plain);
|
||||
|
||||
let expected = &baseline_plain[50..];
|
||||
assert_eq!(pending.pending_slice(), expected);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
//! This module defines the common types and traits used by all
|
||||
//! frame encoding/decoding implementations.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use std::io::Result;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -3,12 +3,16 @@
|
||||
//! This module provides Encoder/Decoder implementations compatible
|
||||
//! with tokio-util's Framed wrapper for easy async frame I/O.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use bytes::{Bytes, BytesMut, BufMut};
|
||||
use std::io::{self, Error, ErrorKind};
|
||||
use std::sync::Arc;
|
||||
use tokio_util::codec::{Decoder, Encoder};
|
||||
|
||||
use crate::protocol::constants::ProtoTag;
|
||||
use crate::protocol::constants::{
|
||||
ProtoTag, is_valid_secure_payload_len, secure_padding_len, secure_payload_len_from_wire_len,
|
||||
};
|
||||
use crate::crypto::SecureRandom;
|
||||
use super::frame::{Frame, FrameMeta, FrameCodec as FrameCodecTrait};
|
||||
|
||||
@@ -135,7 +139,7 @@ fn encode_abridged(frame: &Frame, dst: &mut BytesMut) -> io::Result<()> {
|
||||
let data = &frame.data;
|
||||
|
||||
// Validate alignment
|
||||
if data.len() % 4 != 0 {
|
||||
if !data.len().is_multiple_of(4) {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidInput,
|
||||
format!("abridged frame must be 4-byte aligned, got {} bytes", data.len())
|
||||
@@ -274,13 +278,13 @@ fn decode_secure(src: &mut BytesMut, max_size: usize) -> io::Result<Option<Frame
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Calculate padding (indicated by length not divisible by 4)
|
||||
let padding_len = len % 4;
|
||||
let data_len = if padding_len != 0 {
|
||||
len - padding_len
|
||||
} else {
|
||||
len
|
||||
};
|
||||
let data_len = secure_payload_len_from_wire_len(len).ok_or_else(|| {
|
||||
Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("invalid secure frame length: {len}"),
|
||||
)
|
||||
})?;
|
||||
let padding_len = len - data_len;
|
||||
|
||||
meta.padding_len = padding_len as u8;
|
||||
|
||||
@@ -303,14 +307,15 @@ fn encode_secure(frame: &Frame, dst: &mut BytesMut, rng: &SecureRandom) -> io::R
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Generate padding to make length not divisible by 4
|
||||
let padding_len = if data.len() % 4 == 0 {
|
||||
// Add 1-3 bytes to make it non-aligned
|
||||
(rng.range(3) + 1) as usize
|
||||
} else {
|
||||
// Already non-aligned, can add 0-3
|
||||
rng.range(4) as usize
|
||||
};
|
||||
if !is_valid_secure_payload_len(data.len()) {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("secure payload must be 4-byte aligned, got {}", data.len()),
|
||||
));
|
||||
}
|
||||
|
||||
// Generate padding that keeps total length non-divisible by 4.
|
||||
let padding_len = secure_padding_len(data.len(), rng);
|
||||
|
||||
let total_len = data.len() + padding_len;
|
||||
dst.reserve(4 + total_len);
|
||||
@@ -625,4 +630,4 @@ mod tests {
|
||||
let result = codec.decode(&mut buf);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
//! MTProto frame stream wrappers
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
#![allow(dead_code)]
|
||||
|
||||
use bytes::Bytes;
|
||||
use std::io::{Error, ErrorKind, Result};
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncReadExt, AsyncWriteExt};
|
||||
use crate::protocol::constants::*;
|
||||
@@ -76,7 +78,7 @@ impl<W> AbridgedFrameWriter<W> {
|
||||
impl<W: AsyncWrite + Unpin> AbridgedFrameWriter<W> {
|
||||
/// Write a frame
|
||||
pub async fn write_frame(&mut self, data: &[u8], meta: &FrameMeta) -> Result<()> {
|
||||
if data.len() % 4 != 0 {
|
||||
if !data.len().is_multiple_of(4) {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidInput,
|
||||
format!("Abridged frame must be aligned to 4 bytes, got {}", data.len()),
|
||||
@@ -232,11 +234,13 @@ impl<R: AsyncRead + Unpin> SecureIntermediateFrameReader<R> {
|
||||
let mut data = vec![0u8; len];
|
||||
self.upstream.read_exact(&mut data).await?;
|
||||
|
||||
// Strip padding (not aligned to 4)
|
||||
if len % 4 != 0 {
|
||||
let actual_len = len - (len % 4);
|
||||
data.truncate(actual_len);
|
||||
}
|
||||
let payload_len = secure_payload_len_from_wire_len(len).ok_or_else(|| {
|
||||
Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("Invalid secure frame length: {len}"),
|
||||
)
|
||||
})?;
|
||||
data.truncate(payload_len);
|
||||
|
||||
Ok((Bytes::from(data), meta))
|
||||
}
|
||||
@@ -267,8 +271,15 @@ impl<W: AsyncWrite + Unpin> SecureIntermediateFrameWriter<W> {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Add random padding (0-3 bytes)
|
||||
let padding_len = self.rng.range(4);
|
||||
if !is_valid_secure_payload_len(data.len()) {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("Secure payload must be 4-byte aligned, got {}", data.len()),
|
||||
));
|
||||
}
|
||||
|
||||
// Add padding so total length is never divisible by 4 (MTProto Secure)
|
||||
let padding_len = secure_padding_len(data.len(), &self.rng);
|
||||
let padding = self.rng.bytes(padding_len);
|
||||
|
||||
let total_len = data.len() + padding_len;
|
||||
@@ -320,7 +331,7 @@ impl<R: AsyncRead + Unpin> MtprotoFrameReader<R> {
|
||||
}
|
||||
|
||||
// Validate length
|
||||
if len < MIN_MSG_LEN || len > MAX_MSG_LEN || len % PADDING_FILLER.len() != 0 {
|
||||
if !(MIN_MSG_LEN..=MAX_MSG_LEN).contains(&len) || !len.is_multiple_of(PADDING_FILLER.len()) {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("Invalid message length: {}", len),
|
||||
@@ -550,9 +561,7 @@ mod tests {
|
||||
writer.flush().await.unwrap();
|
||||
|
||||
let (received, _meta) = reader.read_frame().await.unwrap();
|
||||
// Received should have padding stripped to align to 4
|
||||
let expected_len = (data.len() / 4) * 4;
|
||||
assert_eq!(received.len(), expected_len);
|
||||
assert_eq!(received.len(), data.len());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -585,4 +594,4 @@ mod tests {
|
||||
let (received, _) = reader.read_frame().await.unwrap();
|
||||
assert_eq!(&received[..], &data[..]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,32 +12,38 @@ pub mod frame_codec;
|
||||
pub mod frame_stream;
|
||||
|
||||
// Re-export state machine types
|
||||
#[allow(unused_imports)]
|
||||
pub use state::{
|
||||
StreamState, Transition, PollResult,
|
||||
ReadBuffer, WriteBuffer, HeaderBuffer, YieldBuffer,
|
||||
};
|
||||
|
||||
// Re-export buffer pool
|
||||
#[allow(unused_imports)]
|
||||
pub use buffer_pool::{BufferPool, PooledBuffer, PoolStats};
|
||||
|
||||
// Re-export stream implementations
|
||||
#[allow(unused_imports)]
|
||||
pub use crypto_stream::{CryptoReader, CryptoWriter, PassthroughStream};
|
||||
pub use tls_stream::{FakeTlsReader, FakeTlsWriter};
|
||||
|
||||
// Re-export frame types
|
||||
#[allow(unused_imports)]
|
||||
pub use frame::{Frame, FrameMeta, FrameCodec as FrameCodecTrait, create_codec};
|
||||
|
||||
// Re-export tokio-util compatible codecs
|
||||
// Re-export tokio-util compatible codecs
|
||||
#[allow(unused_imports)]
|
||||
pub use frame_codec::{
|
||||
FrameCodec,
|
||||
AbridgedCodec, IntermediateCodec, SecureCodec,
|
||||
};
|
||||
|
||||
// Legacy re-exports for compatibility
|
||||
#[allow(unused_imports)]
|
||||
pub use frame_stream::{
|
||||
AbridgedFrameReader, AbridgedFrameWriter,
|
||||
IntermediateFrameReader, IntermediateFrameWriter,
|
||||
SecureIntermediateFrameReader, SecureIntermediateFrameWriter,
|
||||
MtprotoFrameReader, MtprotoFrameWriter,
|
||||
FrameReaderKind, FrameWriterKind,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
//! This module provides core types and traits for implementing
|
||||
//! stateful async streams with proper partial read/write handling.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use std::io;
|
||||
|
||||
|
||||
@@ -18,6 +18,8 @@
|
||||
//! - Explicit state machines for all async operations
|
||||
//! - Never lose data on partial reads
|
||||
//! - Atomic TLS record formation for writes
|
||||
|
||||
#![allow(dead_code)]
|
||||
//! - Proper handling of all TLS record types
|
||||
//!
|
||||
//! Important nuance (Telegram FakeTLS):
|
||||
@@ -25,14 +27,15 @@
|
||||
//! - However, the on-the-wire record length can exceed 16384 because TLS 1.3
|
||||
//! uses AEAD and can include tag/overhead/padding.
|
||||
//! - Telegram FakeTLS clients (notably iOS) may send Application Data records
|
||||
//! with length up to 16384 + 24 bytes. We accept that as MAX_TLS_CHUNK_SIZE.
|
||||
//! with length up to 16384 + 256 bytes (RFC 8446 §5.2). We accept that as
|
||||
//! MAX_TLS_CHUNK_SIZE.
|
||||
//!
|
||||
//! If you reject those (e.g. validate length <= 16384), you will see errors like:
|
||||
//! "TLS record too large: 16408 bytes"
|
||||
//! and uploads from iOS will break (media/file sending), while small traffic
|
||||
//! may still work.
|
||||
|
||||
use bytes::{Bytes, BytesMut, BufMut};
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use std::io::{self, Error, ErrorKind, Result};
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
@@ -51,9 +54,9 @@ use super::state::{StreamState, HeaderBuffer, YieldBuffer, WriteBuffer};
|
||||
/// TLS record header size (type + version + length)
|
||||
const TLS_HEADER_SIZE: usize = 5;
|
||||
|
||||
/// Maximum TLS fragment size per spec (plaintext fragment).
|
||||
/// We use this for *outgoing* chunking, because we build plain ApplicationData records.
|
||||
const MAX_TLS_PAYLOAD: usize = 16384;
|
||||
/// Maximum TLS fragment size we emit for Application Data.
|
||||
/// Real TLS 1.3 allows up to 16384 + 256 bytes of ciphertext (incl. tag).
|
||||
const MAX_TLS_PAYLOAD: usize = 16384 + 256;
|
||||
|
||||
/// Maximum pending write buffer for one record remainder.
|
||||
/// Note: we never queue unlimited amount of data here; state holds at most one record.
|
||||
@@ -90,7 +93,7 @@ impl TlsRecordHeader {
|
||||
/// - We accept TLS 1.0 header version for ClientHello-like records (0x03 0x01),
|
||||
/// and TLS 1.2/1.3 style version bytes for the rest (we use TLS_VERSION = 0x03 0x03).
|
||||
/// - For Application Data, Telegram FakeTLS may send payload length up to
|
||||
/// MAX_TLS_CHUNK_SIZE (16384 + 24).
|
||||
/// MAX_TLS_CHUNK_SIZE (16384 + 256).
|
||||
/// - For other record types we keep stricter bounds to avoid memory abuse.
|
||||
fn validate(&self) -> Result<()> {
|
||||
// Version: accept TLS 1.0 header (ClientHello quirk) and TLS_VERSION (0x0303).
|
||||
@@ -104,7 +107,7 @@ impl TlsRecordHeader {
|
||||
let len = self.length as usize;
|
||||
|
||||
// Length checks depend on record type.
|
||||
// Telegram FakeTLS: ApplicationData length may be 16384 + 24.
|
||||
// Telegram FakeTLS: ApplicationData length may be 16384 + 256.
|
||||
match self.record_type {
|
||||
TLS_RECORD_APPLICATION => {
|
||||
if len > MAX_TLS_CHUNK_SIZE {
|
||||
@@ -132,7 +135,7 @@ impl TlsRecordHeader {
|
||||
}
|
||||
|
||||
/// Build header bytes
|
||||
fn to_bytes(&self) -> [u8; 5] {
|
||||
fn to_bytes(self) -> [u8; 5] {
|
||||
[
|
||||
self.record_type,
|
||||
self.version[0],
|
||||
@@ -257,9 +260,9 @@ impl<R> FakeTlsReader<R> {
|
||||
fn take_poison_error(&mut self) -> io::Error {
|
||||
match &mut self.state {
|
||||
TlsReaderState::Poisoned { error } => error.take().unwrap_or_else(|| {
|
||||
io::Error::new(ErrorKind::Other, "stream previously poisoned")
|
||||
io::Error::other("stream previously poisoned")
|
||||
}),
|
||||
_ => io::Error::new(ErrorKind::Other, "stream not poisoned"),
|
||||
_ => io::Error::other("stream not poisoned"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -294,7 +297,7 @@ impl<R: AsyncRead + Unpin> AsyncRead for FakeTlsReader<R> {
|
||||
TlsReaderState::Poisoned { error } => {
|
||||
this.state = TlsReaderState::Poisoned { error: None };
|
||||
let err = error.unwrap_or_else(|| {
|
||||
io::Error::new(ErrorKind::Other, "stream previously poisoned")
|
||||
io::Error::other("stream previously poisoned")
|
||||
});
|
||||
return Poll::Ready(Err(err));
|
||||
}
|
||||
@@ -613,9 +616,9 @@ impl<W> FakeTlsWriter<W> {
|
||||
fn take_poison_error(&mut self) -> io::Error {
|
||||
match &mut self.state {
|
||||
TlsWriterState::Poisoned { error } => error.take().unwrap_or_else(|| {
|
||||
io::Error::new(ErrorKind::Other, "stream previously poisoned")
|
||||
io::Error::other("stream previously poisoned")
|
||||
}),
|
||||
_ => io::Error::new(ErrorKind::Other, "stream not poisoned"),
|
||||
_ => io::Error::other("stream not poisoned"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -679,7 +682,7 @@ impl<W: AsyncWrite + Unpin> AsyncWrite for FakeTlsWriter<W> {
|
||||
TlsWriterState::Poisoned { error } => {
|
||||
this.state = TlsWriterState::Poisoned { error: None };
|
||||
let err = error.unwrap_or_else(|| {
|
||||
Error::new(ErrorKind::Other, "stream previously poisoned")
|
||||
Error::other("stream previously poisoned")
|
||||
});
|
||||
return Poll::Ready(Err(err));
|
||||
}
|
||||
@@ -754,9 +757,6 @@ impl<W: AsyncWrite + Unpin> AsyncWrite for FakeTlsWriter<W> {
|
||||
payload_size: chunk_size,
|
||||
};
|
||||
|
||||
// Wake to retry flushing soon.
|
||||
cx.waker().wake_by_ref();
|
||||
|
||||
Poll::Ready(Ok(chunk_size))
|
||||
}
|
||||
}
|
||||
@@ -771,7 +771,7 @@ impl<W: AsyncWrite + Unpin> AsyncWrite for FakeTlsWriter<W> {
|
||||
TlsWriterState::Poisoned { error } => {
|
||||
this.state = TlsWriterState::Poisoned { error: None };
|
||||
let err = error.unwrap_or_else(|| {
|
||||
Error::new(ErrorKind::Other, "stream previously poisoned")
|
||||
Error::other("stream previously poisoned")
|
||||
});
|
||||
return Poll::Ready(Err(err));
|
||||
}
|
||||
@@ -918,10 +918,8 @@ mod tests {
|
||||
let reader = ChunkedReader::new(&record, 100);
|
||||
let mut tls_reader = FakeTlsReader::new(reader);
|
||||
|
||||
let mut buf = vec![0u8; payload.len()];
|
||||
tls_reader.read_exact(&mut buf).await.unwrap();
|
||||
|
||||
assert_eq!(&buf, payload);
|
||||
let buf = tls_reader.read_exact(payload.len()).await.unwrap();
|
||||
assert_eq!(&buf[..], payload);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -935,13 +933,11 @@ mod tests {
|
||||
let reader = ChunkedReader::new(&data, 100);
|
||||
let mut tls_reader = FakeTlsReader::new(reader);
|
||||
|
||||
let mut buf1 = vec![0u8; payload1.len()];
|
||||
tls_reader.read_exact(&mut buf1).await.unwrap();
|
||||
assert_eq!(&buf1, payload1);
|
||||
let buf1 = tls_reader.read_exact(payload1.len()).await.unwrap();
|
||||
assert_eq!(&buf1[..], payload1);
|
||||
|
||||
let mut buf2 = vec![0u8; payload2.len()];
|
||||
tls_reader.read_exact(&mut buf2).await.unwrap();
|
||||
assert_eq!(&buf2, payload2);
|
||||
let buf2 = tls_reader.read_exact(payload2.len()).await.unwrap();
|
||||
assert_eq!(&buf2[..], payload2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -953,10 +949,9 @@ mod tests {
|
||||
let reader = ChunkedReader::new(&record, 1); // 1 byte at a time!
|
||||
let mut tls_reader = FakeTlsReader::new(reader);
|
||||
|
||||
let mut buf = vec![0u8; payload.len()];
|
||||
tls_reader.read_exact(&mut buf).await.unwrap();
|
||||
let buf = tls_reader.read_exact(payload.len()).await.unwrap();
|
||||
|
||||
assert_eq!(&buf, payload);
|
||||
assert_eq!(&buf[..], payload);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -967,10 +962,9 @@ mod tests {
|
||||
let reader = ChunkedReader::new(&record, 7); // Awkward chunk size
|
||||
let mut tls_reader = FakeTlsReader::new(reader);
|
||||
|
||||
let mut buf = vec![0u8; payload.len()];
|
||||
tls_reader.read_exact(&mut buf).await.unwrap();
|
||||
let buf = tls_reader.read_exact(payload.len()).await.unwrap();
|
||||
|
||||
assert_eq!(&buf, payload);
|
||||
assert_eq!(&buf[..], payload);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -983,10 +977,9 @@ mod tests {
|
||||
let reader = ChunkedReader::new(&data, 100);
|
||||
let mut tls_reader = FakeTlsReader::new(reader);
|
||||
|
||||
let mut buf = vec![0u8; payload.len()];
|
||||
tls_reader.read_exact(&mut buf).await.unwrap();
|
||||
let buf = tls_reader.read_exact(payload.len()).await.unwrap();
|
||||
|
||||
assert_eq!(&buf, payload);
|
||||
assert_eq!(&buf[..], payload);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -1000,10 +993,9 @@ mod tests {
|
||||
let reader = ChunkedReader::new(&data, 3); // Small chunks
|
||||
let mut tls_reader = FakeTlsReader::new(reader);
|
||||
|
||||
let mut buf = vec![0u8; payload.len()];
|
||||
tls_reader.read_exact(&mut buf).await.unwrap();
|
||||
let buf = tls_reader.read_exact(payload.len()).await.unwrap();
|
||||
|
||||
assert_eq!(&buf, payload);
|
||||
assert_eq!(&buf[..], payload);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -1244,4 +1236,4 @@ mod tests {
|
||||
let bytes = header.to_bytes();
|
||||
assert_eq!(bytes, [0x17, 0x03, 0x03, 0x12, 0x34]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
//! Stream traits and common types
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use bytes::Bytes;
|
||||
use std::io::Result;
|
||||
use std::pin::Pin;
|
||||
|
||||
255
src/tls_front/cache.rs
Normal file
255
src/tls_front/cache.rs
Normal file
@@ -0,0 +1,255 @@
|
||||
use std::collections::HashMap;
|
||||
use std::net::IpAddr;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant, SystemTime};
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
use tokio::time::sleep;
|
||||
use tracing::{debug, warn, info};
|
||||
|
||||
use crate::tls_front::types::{CachedTlsData, ParsedServerHello, TlsFetchResult};
|
||||
|
||||
/// Lightweight in-memory + optional on-disk cache for TLS fronting data.
|
||||
#[derive(Debug)]
|
||||
pub struct TlsFrontCache {
|
||||
memory: RwLock<HashMap<String, Arc<CachedTlsData>>>,
|
||||
default: Arc<CachedTlsData>,
|
||||
full_cert_sent: RwLock<HashMap<IpAddr, Instant>>,
|
||||
disk_path: PathBuf,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl TlsFrontCache {
|
||||
pub fn new(domains: &[String], default_len: usize, disk_path: impl AsRef<Path>) -> Self {
|
||||
let default_template = ParsedServerHello {
|
||||
version: [0x03, 0x03],
|
||||
random: [0u8; 32],
|
||||
session_id: Vec::new(),
|
||||
cipher_suite: [0x13, 0x01],
|
||||
compression: 0,
|
||||
extensions: Vec::new(),
|
||||
};
|
||||
|
||||
let default = Arc::new(CachedTlsData {
|
||||
server_hello_template: default_template,
|
||||
cert_info: None,
|
||||
cert_payload: None,
|
||||
app_data_records_sizes: vec![default_len],
|
||||
total_app_data_len: default_len,
|
||||
fetched_at: SystemTime::now(),
|
||||
domain: "default".to_string(),
|
||||
});
|
||||
|
||||
let mut map = HashMap::new();
|
||||
for d in domains {
|
||||
map.insert(d.clone(), default.clone());
|
||||
}
|
||||
|
||||
Self {
|
||||
memory: RwLock::new(map),
|
||||
default,
|
||||
full_cert_sent: RwLock::new(HashMap::new()),
|
||||
disk_path: disk_path.as_ref().to_path_buf(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get(&self, sni: &str) -> Arc<CachedTlsData> {
|
||||
let guard = self.memory.read().await;
|
||||
guard.get(sni).cloned().unwrap_or_else(|| self.default.clone())
|
||||
}
|
||||
|
||||
pub async fn contains_domain(&self, domain: &str) -> bool {
|
||||
self.memory.read().await.contains_key(domain)
|
||||
}
|
||||
|
||||
/// Returns true when full cert payload should be sent for client_ip
|
||||
/// according to TTL policy.
|
||||
pub async fn take_full_cert_budget_for_ip(
|
||||
&self,
|
||||
client_ip: IpAddr,
|
||||
ttl: Duration,
|
||||
) -> bool {
|
||||
if ttl.is_zero() {
|
||||
self.full_cert_sent
|
||||
.write()
|
||||
.await
|
||||
.insert(client_ip, Instant::now());
|
||||
return true;
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
let mut guard = self.full_cert_sent.write().await;
|
||||
guard.retain(|_, seen_at| now.duration_since(*seen_at) < ttl);
|
||||
|
||||
match guard.get_mut(&client_ip) {
|
||||
Some(seen_at) => {
|
||||
if now.duration_since(*seen_at) >= ttl {
|
||||
*seen_at = now;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
None => {
|
||||
guard.insert(client_ip, now);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn set(&self, domain: &str, data: CachedTlsData) {
|
||||
let mut guard = self.memory.write().await;
|
||||
guard.insert(domain.to_string(), Arc::new(data));
|
||||
}
|
||||
|
||||
pub async fn load_from_disk(&self) {
|
||||
let path = self.disk_path.clone();
|
||||
if tokio::fs::create_dir_all(&path).await.is_err() {
|
||||
return;
|
||||
}
|
||||
let mut loaded = 0usize;
|
||||
if let Ok(mut dir) = tokio::fs::read_dir(&path).await {
|
||||
while let Ok(Some(entry)) = dir.next_entry().await {
|
||||
if let Ok(name) = entry.file_name().into_string() {
|
||||
if !name.ends_with(".json") {
|
||||
continue;
|
||||
}
|
||||
if let Ok(data) = tokio::fs::read(entry.path()).await
|
||||
&& let Ok(mut cached) = serde_json::from_slice::<CachedTlsData>(&data)
|
||||
{
|
||||
if cached.domain.is_empty()
|
||||
|| cached.domain.len() > 255
|
||||
|| !cached.domain.chars().all(|c| c.is_ascii_alphanumeric() || c == '.' || c == '-')
|
||||
{
|
||||
warn!(file = %name, "Skipping TLS cache entry with invalid domain");
|
||||
continue;
|
||||
}
|
||||
// fetched_at is skipped during deserialization; approximate with file mtime if available.
|
||||
if let Ok(meta) = entry.metadata().await
|
||||
&& let Ok(modified) = meta.modified()
|
||||
{
|
||||
cached.fetched_at = modified;
|
||||
}
|
||||
// Drop entries older than 72h
|
||||
if let Ok(age) = cached.fetched_at.elapsed()
|
||||
&& age > Duration::from_secs(72 * 3600)
|
||||
{
|
||||
warn!(domain = %cached.domain, "Skipping stale TLS cache entry (>72h)");
|
||||
continue;
|
||||
}
|
||||
let domain = cached.domain.clone();
|
||||
self.set(&domain, cached).await;
|
||||
loaded += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if loaded > 0 {
|
||||
info!(count = loaded, "Loaded TLS cache entries from disk");
|
||||
}
|
||||
}
|
||||
|
||||
async fn persist(&self, domain: &str, data: &CachedTlsData) {
|
||||
if tokio::fs::create_dir_all(&self.disk_path).await.is_err() {
|
||||
return;
|
||||
}
|
||||
let fname = format!("{}.json", domain.replace(['/', '\\'], "_"));
|
||||
let path = self.disk_path.join(fname);
|
||||
if let Ok(json) = serde_json::to_vec_pretty(data) {
|
||||
// best-effort write
|
||||
let _ = tokio::fs::write(path, json).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn background updater that periodically refreshes cached domains using provided fetcher.
|
||||
pub fn spawn_updater<F>(
|
||||
self: Arc<Self>,
|
||||
domains: Vec<String>,
|
||||
interval: Duration,
|
||||
fetcher: F,
|
||||
) where
|
||||
F: Fn(String) -> tokio::task::JoinHandle<()> + Send + Sync + 'static,
|
||||
{
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
for domain in &domains {
|
||||
let _ = fetcher(domain.clone()).await;
|
||||
}
|
||||
sleep(interval).await;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Replace cached entry from a fetch result.
|
||||
pub async fn update_from_fetch(&self, domain: &str, fetched: TlsFetchResult) {
|
||||
let data = CachedTlsData {
|
||||
server_hello_template: fetched.server_hello_parsed,
|
||||
cert_info: fetched.cert_info,
|
||||
cert_payload: fetched.cert_payload,
|
||||
app_data_records_sizes: fetched.app_data_records_sizes.clone(),
|
||||
total_app_data_len: fetched.total_app_data_len,
|
||||
fetched_at: SystemTime::now(),
|
||||
domain: domain.to_string(),
|
||||
};
|
||||
|
||||
self.set(domain, data.clone()).await;
|
||||
self.persist(domain, &data).await;
|
||||
debug!(domain = %domain, len = fetched.total_app_data_len, "TLS cache updated");
|
||||
}
|
||||
|
||||
pub fn default_entry(&self) -> Arc<CachedTlsData> {
|
||||
self.default.clone()
|
||||
}
|
||||
|
||||
pub fn disk_path(&self) -> &Path {
|
||||
&self.disk_path
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_take_full_cert_budget_for_ip_uses_ttl() {
|
||||
let cache = TlsFrontCache::new(
|
||||
&["example.com".to_string()],
|
||||
1024,
|
||||
"tlsfront-test-cache",
|
||||
);
|
||||
let ip: IpAddr = "127.0.0.1".parse().expect("ip");
|
||||
let ttl = Duration::from_millis(80);
|
||||
|
||||
assert!(cache
|
||||
.take_full_cert_budget_for_ip(ip, ttl)
|
||||
.await);
|
||||
assert!(!cache
|
||||
.take_full_cert_budget_for_ip(ip, ttl)
|
||||
.await);
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(90)).await;
|
||||
|
||||
assert!(cache
|
||||
.take_full_cert_budget_for_ip(ip, ttl)
|
||||
.await);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_take_full_cert_budget_for_ip_zero_ttl_always_allows_full_payload() {
|
||||
let cache = TlsFrontCache::new(
|
||||
&["example.com".to_string()],
|
||||
1024,
|
||||
"tlsfront-test-cache",
|
||||
);
|
||||
let ip: IpAddr = "127.0.0.1".parse().expect("ip");
|
||||
let ttl = Duration::ZERO;
|
||||
|
||||
assert!(cache
|
||||
.take_full_cert_budget_for_ip(ip, ttl)
|
||||
.await);
|
||||
assert!(cache
|
||||
.take_full_cert_budget_for_ip(ip, ttl)
|
||||
.await);
|
||||
}
|
||||
}
|
||||
388
src/tls_front/emulator.rs
Normal file
388
src/tls_front/emulator.rs
Normal file
@@ -0,0 +1,388 @@
|
||||
use crate::crypto::{sha256_hmac, SecureRandom};
|
||||
use crate::protocol::constants::{
|
||||
TLS_RECORD_APPLICATION, TLS_RECORD_CHANGE_CIPHER, TLS_RECORD_HANDSHAKE, TLS_VERSION,
|
||||
};
|
||||
use crate::protocol::tls::{TLS_DIGEST_LEN, TLS_DIGEST_POS, gen_fake_x25519_key};
|
||||
use crate::tls_front::types::{CachedTlsData, ParsedCertificateInfo};
|
||||
|
||||
const MIN_APP_DATA: usize = 64;
|
||||
const MAX_APP_DATA: usize = 16640; // RFC 8446 §5.2 allows up to 2^14 + 256
|
||||
|
||||
fn jitter_and_clamp_sizes(sizes: &[usize], rng: &SecureRandom) -> Vec<usize> {
|
||||
sizes
|
||||
.iter()
|
||||
.map(|&size| {
|
||||
let base = size.clamp(MIN_APP_DATA, MAX_APP_DATA);
|
||||
let jitter_range = ((base as f64) * 0.03).round() as i64;
|
||||
if jitter_range == 0 {
|
||||
return base;
|
||||
}
|
||||
let mut rand_bytes = [0u8; 2];
|
||||
rand_bytes.copy_from_slice(&rng.bytes(2));
|
||||
let span = 2 * jitter_range + 1;
|
||||
let delta = (u16::from_le_bytes(rand_bytes) as i64 % span) - jitter_range;
|
||||
let adjusted = (base as i64 + delta).clamp(MIN_APP_DATA as i64, MAX_APP_DATA as i64);
|
||||
adjusted as usize
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn app_data_body_capacity(sizes: &[usize]) -> usize {
|
||||
sizes.iter().map(|&size| size.saturating_sub(17)).sum()
|
||||
}
|
||||
|
||||
fn ensure_payload_capacity(mut sizes: Vec<usize>, payload_len: usize) -> Vec<usize> {
|
||||
if payload_len == 0 {
|
||||
return sizes;
|
||||
}
|
||||
|
||||
let mut body_total = app_data_body_capacity(&sizes);
|
||||
if body_total >= payload_len {
|
||||
return sizes;
|
||||
}
|
||||
|
||||
if let Some(last) = sizes.last_mut() {
|
||||
let free = MAX_APP_DATA.saturating_sub(*last);
|
||||
let grow = free.min(payload_len - body_total);
|
||||
*last += grow;
|
||||
body_total += grow;
|
||||
}
|
||||
|
||||
while body_total < payload_len {
|
||||
let remaining = payload_len - body_total;
|
||||
let chunk = (remaining + 17).clamp(MIN_APP_DATA, MAX_APP_DATA);
|
||||
sizes.push(chunk);
|
||||
body_total += chunk.saturating_sub(17);
|
||||
}
|
||||
|
||||
sizes
|
||||
}
|
||||
|
||||
fn build_compact_cert_info_payload(cert_info: &ParsedCertificateInfo) -> Option<Vec<u8>> {
|
||||
let mut fields = Vec::new();
|
||||
|
||||
if let Some(subject) = cert_info.subject_cn.as_deref() {
|
||||
fields.push(format!("CN={subject}"));
|
||||
}
|
||||
if let Some(issuer) = cert_info.issuer_cn.as_deref() {
|
||||
fields.push(format!("ISSUER={issuer}"));
|
||||
}
|
||||
if let Some(not_before) = cert_info.not_before_unix {
|
||||
fields.push(format!("NB={not_before}"));
|
||||
}
|
||||
if let Some(not_after) = cert_info.not_after_unix {
|
||||
fields.push(format!("NA={not_after}"));
|
||||
}
|
||||
if !cert_info.san_names.is_empty() {
|
||||
let san = cert_info
|
||||
.san_names
|
||||
.iter()
|
||||
.take(8)
|
||||
.map(String::as_str)
|
||||
.collect::<Vec<_>>()
|
||||
.join(",");
|
||||
fields.push(format!("SAN={san}"));
|
||||
}
|
||||
|
||||
if fields.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut payload = fields.join(";").into_bytes();
|
||||
if payload.len() > 512 {
|
||||
payload.truncate(512);
|
||||
}
|
||||
Some(payload)
|
||||
}
|
||||
|
||||
/// Build a ServerHello + CCS + ApplicationData sequence using cached TLS metadata.
|
||||
pub fn build_emulated_server_hello(
|
||||
secret: &[u8],
|
||||
client_digest: &[u8; TLS_DIGEST_LEN],
|
||||
session_id: &[u8],
|
||||
cached: &CachedTlsData,
|
||||
use_full_cert_payload: bool,
|
||||
rng: &SecureRandom,
|
||||
alpn: Option<Vec<u8>>,
|
||||
new_session_tickets: u8,
|
||||
) -> Vec<u8> {
|
||||
// --- ServerHello ---
|
||||
let mut extensions = Vec::new();
|
||||
// KeyShare (x25519)
|
||||
let key = gen_fake_x25519_key(rng);
|
||||
extensions.extend_from_slice(&0x0033u16.to_be_bytes()); // key_share
|
||||
extensions.extend_from_slice(&(2 + 2 + 32u16).to_be_bytes()); // len
|
||||
extensions.extend_from_slice(&0x001du16.to_be_bytes()); // X25519
|
||||
extensions.extend_from_slice(&(32u16).to_be_bytes());
|
||||
extensions.extend_from_slice(&key);
|
||||
// supported_versions (TLS1.3)
|
||||
extensions.extend_from_slice(&0x002bu16.to_be_bytes());
|
||||
extensions.extend_from_slice(&(2u16).to_be_bytes());
|
||||
extensions.extend_from_slice(&0x0304u16.to_be_bytes());
|
||||
if let Some(alpn_proto) = &alpn {
|
||||
extensions.extend_from_slice(&0x0010u16.to_be_bytes());
|
||||
let list_len: u16 = 1 + alpn_proto.len() as u16;
|
||||
let ext_len: u16 = 2 + list_len;
|
||||
extensions.extend_from_slice(&ext_len.to_be_bytes());
|
||||
extensions.extend_from_slice(&list_len.to_be_bytes());
|
||||
extensions.push(alpn_proto.len() as u8);
|
||||
extensions.extend_from_slice(alpn_proto);
|
||||
}
|
||||
|
||||
let extensions_len = extensions.len() as u16;
|
||||
|
||||
let body_len = 2 + // version
|
||||
32 + // random
|
||||
1 + session_id.len() + // session id
|
||||
2 + // cipher
|
||||
1 + // compression
|
||||
2 + extensions.len(); // extensions
|
||||
|
||||
let mut message = Vec::with_capacity(4 + body_len);
|
||||
message.push(0x02); // ServerHello
|
||||
let len_bytes = (body_len as u32).to_be_bytes();
|
||||
message.extend_from_slice(&len_bytes[1..4]);
|
||||
message.extend_from_slice(&cached.server_hello_template.version); // 0x0303
|
||||
message.extend_from_slice(&[0u8; 32]); // random placeholder
|
||||
message.push(session_id.len() as u8);
|
||||
message.extend_from_slice(session_id);
|
||||
let cipher = if cached.server_hello_template.cipher_suite == [0, 0] {
|
||||
[0x13, 0x01]
|
||||
} else {
|
||||
cached.server_hello_template.cipher_suite
|
||||
};
|
||||
message.extend_from_slice(&cipher);
|
||||
message.push(cached.server_hello_template.compression);
|
||||
message.extend_from_slice(&extensions_len.to_be_bytes());
|
||||
message.extend_from_slice(&extensions);
|
||||
|
||||
let mut server_hello = Vec::with_capacity(5 + message.len());
|
||||
server_hello.push(TLS_RECORD_HANDSHAKE);
|
||||
server_hello.extend_from_slice(&TLS_VERSION);
|
||||
server_hello.extend_from_slice(&(message.len() as u16).to_be_bytes());
|
||||
server_hello.extend_from_slice(&message);
|
||||
|
||||
// --- ChangeCipherSpec ---
|
||||
let change_cipher_spec = [
|
||||
TLS_RECORD_CHANGE_CIPHER,
|
||||
TLS_VERSION[0],
|
||||
TLS_VERSION[1],
|
||||
0x00,
|
||||
0x01,
|
||||
0x01,
|
||||
];
|
||||
|
||||
// --- ApplicationData (fake encrypted records) ---
|
||||
// Use the same number and sizes of ApplicationData records as the cached server.
|
||||
let mut sizes = cached.app_data_records_sizes.clone();
|
||||
if sizes.is_empty() {
|
||||
sizes.push(cached.total_app_data_len.max(1024));
|
||||
}
|
||||
let mut sizes = jitter_and_clamp_sizes(&sizes, rng);
|
||||
let compact_payload = cached
|
||||
.cert_info
|
||||
.as_ref()
|
||||
.and_then(build_compact_cert_info_payload);
|
||||
let selected_payload: Option<&[u8]> = if use_full_cert_payload {
|
||||
cached
|
||||
.cert_payload
|
||||
.as_ref()
|
||||
.map(|payload| payload.certificate_message.as_slice())
|
||||
.filter(|payload| !payload.is_empty())
|
||||
.or(compact_payload.as_deref())
|
||||
} else {
|
||||
compact_payload.as_deref()
|
||||
};
|
||||
|
||||
if let Some(payload) = selected_payload {
|
||||
sizes = ensure_payload_capacity(sizes, payload.len());
|
||||
}
|
||||
|
||||
let mut app_data = Vec::new();
|
||||
let mut payload_offset = 0usize;
|
||||
for size in sizes {
|
||||
let mut rec = Vec::with_capacity(5 + size);
|
||||
rec.push(TLS_RECORD_APPLICATION);
|
||||
rec.extend_from_slice(&TLS_VERSION);
|
||||
rec.extend_from_slice(&(size as u16).to_be_bytes());
|
||||
|
||||
if let Some(payload) = selected_payload {
|
||||
if size > 17 {
|
||||
let body_len = size - 17;
|
||||
let remaining = payload.len().saturating_sub(payload_offset);
|
||||
let copy_len = remaining.min(body_len);
|
||||
if copy_len > 0 {
|
||||
rec.extend_from_slice(&payload[payload_offset..payload_offset + copy_len]);
|
||||
payload_offset += copy_len;
|
||||
}
|
||||
if body_len > copy_len {
|
||||
rec.extend_from_slice(&rng.bytes(body_len - copy_len));
|
||||
}
|
||||
rec.push(0x16); // inner content type marker (handshake)
|
||||
rec.extend_from_slice(&rng.bytes(16)); // AEAD-like tag
|
||||
} else {
|
||||
rec.extend_from_slice(&rng.bytes(size));
|
||||
}
|
||||
} else if size > 17 {
|
||||
let body_len = size - 17;
|
||||
rec.extend_from_slice(&rng.bytes(body_len));
|
||||
rec.push(0x16); // inner content type marker (handshake)
|
||||
rec.extend_from_slice(&rng.bytes(16)); // AEAD-like tag
|
||||
} else {
|
||||
rec.extend_from_slice(&rng.bytes(size));
|
||||
}
|
||||
app_data.extend_from_slice(&rec);
|
||||
}
|
||||
|
||||
// --- Combine ---
|
||||
// Optional NewSessionTicket mimic records (opaque ApplicationData for fingerprint).
|
||||
let mut tickets = Vec::new();
|
||||
if new_session_tickets > 0 {
|
||||
for _ in 0..new_session_tickets {
|
||||
let ticket_len: usize = rng.range(48) + 48;
|
||||
let mut rec = Vec::with_capacity(5 + ticket_len);
|
||||
rec.push(TLS_RECORD_APPLICATION);
|
||||
rec.extend_from_slice(&TLS_VERSION);
|
||||
rec.extend_from_slice(&(ticket_len as u16).to_be_bytes());
|
||||
rec.extend_from_slice(&rng.bytes(ticket_len));
|
||||
tickets.extend_from_slice(&rec);
|
||||
}
|
||||
}
|
||||
|
||||
let mut response = Vec::with_capacity(server_hello.len() + change_cipher_spec.len() + app_data.len() + tickets.len());
|
||||
response.extend_from_slice(&server_hello);
|
||||
response.extend_from_slice(&change_cipher_spec);
|
||||
response.extend_from_slice(&app_data);
|
||||
response.extend_from_slice(&tickets);
|
||||
|
||||
// --- HMAC ---
|
||||
let mut hmac_input = Vec::with_capacity(TLS_DIGEST_LEN + response.len());
|
||||
hmac_input.extend_from_slice(client_digest);
|
||||
hmac_input.extend_from_slice(&response);
|
||||
let digest = sha256_hmac(secret, &hmac_input);
|
||||
response[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN].copy_from_slice(&digest);
|
||||
|
||||
response
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::SystemTime;
|
||||
|
||||
use crate::tls_front::types::{CachedTlsData, ParsedServerHello, TlsCertPayload};
|
||||
|
||||
use super::build_emulated_server_hello;
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::protocol::constants::{
|
||||
TLS_RECORD_APPLICATION, TLS_RECORD_CHANGE_CIPHER, TLS_RECORD_HANDSHAKE,
|
||||
};
|
||||
|
||||
fn first_app_data_payload(response: &[u8]) -> &[u8] {
|
||||
let hello_len = u16::from_be_bytes([response[3], response[4]]) as usize;
|
||||
let ccs_start = 5 + hello_len;
|
||||
let ccs_len = u16::from_be_bytes([response[ccs_start + 3], response[ccs_start + 4]]) as usize;
|
||||
let app_start = ccs_start + 5 + ccs_len;
|
||||
let app_len = u16::from_be_bytes([response[app_start + 3], response[app_start + 4]]) as usize;
|
||||
&response[app_start + 5..app_start + 5 + app_len]
|
||||
}
|
||||
|
||||
fn make_cached(cert_payload: Option<TlsCertPayload>) -> CachedTlsData {
|
||||
CachedTlsData {
|
||||
server_hello_template: ParsedServerHello {
|
||||
version: [0x03, 0x03],
|
||||
random: [0u8; 32],
|
||||
session_id: Vec::new(),
|
||||
cipher_suite: [0x13, 0x01],
|
||||
compression: 0,
|
||||
extensions: Vec::new(),
|
||||
},
|
||||
cert_info: None,
|
||||
cert_payload,
|
||||
app_data_records_sizes: vec![64],
|
||||
total_app_data_len: 64,
|
||||
fetched_at: SystemTime::now(),
|
||||
domain: "example.com".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_emulated_server_hello_uses_cached_cert_payload() {
|
||||
let cert_msg = vec![0x0b, 0x00, 0x00, 0x05, 0x00, 0xaa, 0xbb, 0xcc, 0xdd];
|
||||
let cached = make_cached(Some(TlsCertPayload {
|
||||
cert_chain_der: vec![vec![0x30, 0x01, 0x00]],
|
||||
certificate_message: cert_msg.clone(),
|
||||
}));
|
||||
let rng = SecureRandom::new();
|
||||
let response = build_emulated_server_hello(
|
||||
b"secret",
|
||||
&[0x11; 32],
|
||||
&[0x22; 16],
|
||||
&cached,
|
||||
true,
|
||||
&rng,
|
||||
None,
|
||||
0,
|
||||
);
|
||||
|
||||
assert_eq!(response[0], TLS_RECORD_HANDSHAKE);
|
||||
let hello_len = u16::from_be_bytes([response[3], response[4]]) as usize;
|
||||
let ccs_start = 5 + hello_len;
|
||||
assert_eq!(response[ccs_start], TLS_RECORD_CHANGE_CIPHER);
|
||||
let app_start = ccs_start + 6;
|
||||
assert_eq!(response[app_start], TLS_RECORD_APPLICATION);
|
||||
|
||||
let payload = first_app_data_payload(&response);
|
||||
assert!(payload.starts_with(&cert_msg));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_emulated_server_hello_random_fallback_when_no_cert_payload() {
|
||||
let cached = make_cached(None);
|
||||
let rng = SecureRandom::new();
|
||||
let response = build_emulated_server_hello(
|
||||
b"secret",
|
||||
&[0x22; 32],
|
||||
&[0x33; 16],
|
||||
&cached,
|
||||
true,
|
||||
&rng,
|
||||
None,
|
||||
0,
|
||||
);
|
||||
|
||||
let payload = first_app_data_payload(&response);
|
||||
assert!(payload.len() >= 64);
|
||||
assert_eq!(payload[payload.len() - 17], 0x16);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_emulated_server_hello_uses_compact_payload_after_first() {
|
||||
let cert_msg = vec![0x0b, 0x00, 0x00, 0x05, 0x00, 0xaa, 0xbb, 0xcc, 0xdd];
|
||||
let mut cached = make_cached(Some(TlsCertPayload {
|
||||
cert_chain_der: vec![vec![0x30, 0x01, 0x00]],
|
||||
certificate_message: cert_msg,
|
||||
}));
|
||||
cached.cert_info = Some(crate::tls_front::types::ParsedCertificateInfo {
|
||||
not_after_unix: Some(1_900_000_000),
|
||||
not_before_unix: Some(1_700_000_000),
|
||||
issuer_cn: Some("Issuer".to_string()),
|
||||
subject_cn: Some("example.com".to_string()),
|
||||
san_names: vec!["example.com".to_string(), "www.example.com".to_string()],
|
||||
});
|
||||
|
||||
let rng = SecureRandom::new();
|
||||
let response = build_emulated_server_hello(
|
||||
b"secret",
|
||||
&[0x44; 32],
|
||||
&[0x55; 16],
|
||||
&cached,
|
||||
false,
|
||||
&rng,
|
||||
None,
|
||||
0,
|
||||
);
|
||||
|
||||
let payload = first_app_data_payload(&response);
|
||||
assert!(payload.starts_with(b"CN=example.com"));
|
||||
}
|
||||
}
|
||||
756
src/tls_front/fetcher.rs
Normal file
756
src/tls_front/fetcher.rs
Normal file
@@ -0,0 +1,756 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{Result, anyhow};
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
#[cfg(unix)]
|
||||
use tokio::net::UnixStream;
|
||||
use tokio::time::timeout;
|
||||
use tokio_rustls::client::TlsStream;
|
||||
use tokio_rustls::TlsConnector;
|
||||
use tracing::{debug, warn};
|
||||
|
||||
use rustls::client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier};
|
||||
use rustls::client::ClientConfig;
|
||||
use rustls::pki_types::{CertificateDer, ServerName, UnixTime};
|
||||
use rustls::{DigitallySignedStruct, Error as RustlsError};
|
||||
|
||||
use x509_parser::prelude::FromDer;
|
||||
use x509_parser::certificate::X509Certificate;
|
||||
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::network::dns_overrides::resolve_socket_addr;
|
||||
use crate::protocol::constants::{TLS_RECORD_APPLICATION, TLS_RECORD_HANDSHAKE};
|
||||
use crate::transport::proxy_protocol::{ProxyProtocolV1Builder, ProxyProtocolV2Builder};
|
||||
use crate::tls_front::types::{
|
||||
ParsedCertificateInfo,
|
||||
ParsedServerHello,
|
||||
TlsCertPayload,
|
||||
TlsExtension,
|
||||
TlsFetchResult,
|
||||
};
|
||||
|
||||
/// No-op verifier: accept any certificate (we only need lengths and metadata).
|
||||
#[derive(Debug)]
|
||||
struct NoVerify;
|
||||
|
||||
impl ServerCertVerifier for NoVerify {
|
||||
fn verify_server_cert(
|
||||
&self,
|
||||
_end_entity: &CertificateDer<'_>,
|
||||
_intermediates: &[CertificateDer<'_>],
|
||||
_server_name: &ServerName<'_>,
|
||||
_ocsp: &[u8],
|
||||
_now: UnixTime,
|
||||
) -> Result<ServerCertVerified, RustlsError> {
|
||||
Ok(ServerCertVerified::assertion())
|
||||
}
|
||||
|
||||
fn verify_tls12_signature(
|
||||
&self,
|
||||
_message: &[u8],
|
||||
_cert: &CertificateDer<'_>,
|
||||
_dss: &DigitallySignedStruct,
|
||||
) -> Result<HandshakeSignatureValid, RustlsError> {
|
||||
Ok(HandshakeSignatureValid::assertion())
|
||||
}
|
||||
|
||||
fn verify_tls13_signature(
|
||||
&self,
|
||||
_message: &[u8],
|
||||
_cert: &CertificateDer<'_>,
|
||||
_dss: &DigitallySignedStruct,
|
||||
) -> Result<HandshakeSignatureValid, RustlsError> {
|
||||
Ok(HandshakeSignatureValid::assertion())
|
||||
}
|
||||
|
||||
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
|
||||
use rustls::SignatureScheme::*;
|
||||
vec![
|
||||
RSA_PKCS1_SHA256,
|
||||
RSA_PSS_SHA256,
|
||||
ECDSA_NISTP256_SHA256,
|
||||
ECDSA_NISTP384_SHA384,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
fn build_client_config() -> Arc<ClientConfig> {
|
||||
let root = rustls::RootCertStore::empty();
|
||||
|
||||
let provider = rustls::crypto::ring::default_provider();
|
||||
let mut config = ClientConfig::builder_with_provider(Arc::new(provider))
|
||||
.with_protocol_versions(&[&rustls::version::TLS13, &rustls::version::TLS12])
|
||||
.expect("protocol versions")
|
||||
.with_root_certificates(root)
|
||||
.with_no_client_auth();
|
||||
|
||||
config
|
||||
.dangerous()
|
||||
.set_certificate_verifier(Arc::new(NoVerify));
|
||||
|
||||
Arc::new(config)
|
||||
}
|
||||
|
||||
fn build_client_hello(sni: &str, rng: &SecureRandom) -> Vec<u8> {
|
||||
// === ClientHello body ===
|
||||
let mut body = Vec::new();
|
||||
|
||||
// Legacy version (TLS 1.0) as in real ClientHello headers
|
||||
body.extend_from_slice(&[0x03, 0x03]);
|
||||
|
||||
// Random
|
||||
body.extend_from_slice(&rng.bytes(32));
|
||||
|
||||
// Session ID: empty
|
||||
body.push(0);
|
||||
|
||||
// Cipher suites (common minimal set, TLS1.3 + a few 1.2 fallbacks)
|
||||
let cipher_suites: [u8; 10] = [
|
||||
0x13, 0x01, // TLS_AES_128_GCM_SHA256
|
||||
0x13, 0x02, // TLS_AES_256_GCM_SHA384
|
||||
0x13, 0x03, // TLS_CHACHA20_POLY1305_SHA256
|
||||
0x00, 0x2f, // TLS_RSA_WITH_AES_128_CBC_SHA (legacy)
|
||||
0x00, 0xff, // RENEGOTIATION_INFO_SCSV
|
||||
];
|
||||
body.extend_from_slice(&(cipher_suites.len() as u16).to_be_bytes());
|
||||
body.extend_from_slice(&cipher_suites);
|
||||
|
||||
// Compression methods: null only
|
||||
body.push(1);
|
||||
body.push(0);
|
||||
|
||||
// === Extensions ===
|
||||
let mut exts = Vec::new();
|
||||
|
||||
// server_name (SNI)
|
||||
let sni_bytes = sni.as_bytes();
|
||||
let mut sni_ext = Vec::with_capacity(5 + sni_bytes.len());
|
||||
sni_ext.extend_from_slice(&(sni_bytes.len() as u16 + 3).to_be_bytes());
|
||||
sni_ext.push(0); // host_name
|
||||
sni_ext.extend_from_slice(&(sni_bytes.len() as u16).to_be_bytes());
|
||||
sni_ext.extend_from_slice(sni_bytes);
|
||||
exts.extend_from_slice(&0x0000u16.to_be_bytes());
|
||||
exts.extend_from_slice(&(sni_ext.len() as u16).to_be_bytes());
|
||||
exts.extend_from_slice(&sni_ext);
|
||||
|
||||
// supported_groups
|
||||
let groups: [u16; 2] = [0x001d, 0x0017]; // x25519, secp256r1
|
||||
exts.extend_from_slice(&0x000au16.to_be_bytes());
|
||||
exts.extend_from_slice(&((2 + groups.len() * 2) as u16).to_be_bytes());
|
||||
exts.extend_from_slice(&(groups.len() as u16 * 2).to_be_bytes());
|
||||
for g in groups { exts.extend_from_slice(&g.to_be_bytes()); }
|
||||
|
||||
// signature_algorithms
|
||||
let sig_algs: [u16; 4] = [0x0804, 0x0805, 0x0403, 0x0503]; // rsa_pss_rsae_sha256/384, ecdsa_secp256r1_sha256, rsa_pkcs1_sha256
|
||||
exts.extend_from_slice(&0x000du16.to_be_bytes());
|
||||
exts.extend_from_slice(&((2 + sig_algs.len() * 2) as u16).to_be_bytes());
|
||||
exts.extend_from_slice(&(sig_algs.len() as u16 * 2).to_be_bytes());
|
||||
for a in sig_algs { exts.extend_from_slice(&a.to_be_bytes()); }
|
||||
|
||||
// supported_versions (TLS1.3 + TLS1.2)
|
||||
let versions: [u16; 2] = [0x0304, 0x0303];
|
||||
exts.extend_from_slice(&0x002bu16.to_be_bytes());
|
||||
exts.extend_from_slice(&((1 + versions.len() * 2) as u16).to_be_bytes());
|
||||
exts.push((versions.len() * 2) as u8);
|
||||
for v in versions { exts.extend_from_slice(&v.to_be_bytes()); }
|
||||
|
||||
// key_share (x25519)
|
||||
let key = gen_key_share(rng);
|
||||
let mut keyshare = Vec::with_capacity(4 + key.len());
|
||||
keyshare.extend_from_slice(&0x001du16.to_be_bytes()); // group
|
||||
keyshare.extend_from_slice(&(key.len() as u16).to_be_bytes());
|
||||
keyshare.extend_from_slice(&key);
|
||||
exts.extend_from_slice(&0x0033u16.to_be_bytes());
|
||||
exts.extend_from_slice(&((2 + keyshare.len()) as u16).to_be_bytes());
|
||||
exts.extend_from_slice(&(keyshare.len() as u16).to_be_bytes());
|
||||
exts.extend_from_slice(&keyshare);
|
||||
|
||||
// ALPN (http/1.1)
|
||||
let alpn_proto = b"http/1.1";
|
||||
exts.extend_from_slice(&0x0010u16.to_be_bytes());
|
||||
exts.extend_from_slice(&((2 + 1 + alpn_proto.len()) as u16).to_be_bytes());
|
||||
exts.extend_from_slice(&((1 + alpn_proto.len()) as u16).to_be_bytes());
|
||||
exts.push(alpn_proto.len() as u8);
|
||||
exts.extend_from_slice(alpn_proto);
|
||||
|
||||
// padding to reduce recognizability and keep length ~500 bytes
|
||||
const TARGET_EXT_LEN: usize = 180;
|
||||
if exts.len() < TARGET_EXT_LEN {
|
||||
let remaining = TARGET_EXT_LEN - exts.len();
|
||||
if remaining > 4 {
|
||||
let pad_len = remaining - 4; // minus type+len
|
||||
exts.extend_from_slice(&0x0015u16.to_be_bytes()); // padding extension
|
||||
exts.extend_from_slice(&(pad_len as u16).to_be_bytes());
|
||||
exts.resize(exts.len() + pad_len, 0);
|
||||
}
|
||||
}
|
||||
|
||||
// Extensions length prefix
|
||||
body.extend_from_slice(&(exts.len() as u16).to_be_bytes());
|
||||
body.extend_from_slice(&exts);
|
||||
|
||||
// === Handshake wrapper ===
|
||||
let mut handshake = Vec::new();
|
||||
handshake.push(0x01); // ClientHello
|
||||
let len_bytes = (body.len() as u32).to_be_bytes();
|
||||
handshake.extend_from_slice(&len_bytes[1..4]);
|
||||
handshake.extend_from_slice(&body);
|
||||
|
||||
// === Record ===
|
||||
let mut record = Vec::new();
|
||||
record.push(TLS_RECORD_HANDSHAKE);
|
||||
record.extend_from_slice(&[0x03, 0x01]); // legacy record version
|
||||
record.extend_from_slice(&(handshake.len() as u16).to_be_bytes());
|
||||
record.extend_from_slice(&handshake);
|
||||
|
||||
record
|
||||
}
|
||||
|
||||
fn gen_key_share(rng: &SecureRandom) -> [u8; 32] {
|
||||
let mut key = [0u8; 32];
|
||||
key.copy_from_slice(&rng.bytes(32));
|
||||
key
|
||||
}
|
||||
|
||||
async fn read_tls_record<S>(stream: &mut S) -> Result<(u8, Vec<u8>)>
|
||||
where
|
||||
S: AsyncRead + Unpin,
|
||||
{
|
||||
let mut header = [0u8; 5];
|
||||
stream.read_exact(&mut header).await?;
|
||||
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
|
||||
let mut body = vec![0u8; len];
|
||||
stream.read_exact(&mut body).await?;
|
||||
Ok((header[0], body))
|
||||
}
|
||||
|
||||
fn parse_server_hello(body: &[u8]) -> Option<ParsedServerHello> {
|
||||
if body.len() < 4 || body[0] != 0x02 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let msg_len = u32::from_be_bytes([0, body[1], body[2], body[3]]) as usize;
|
||||
if msg_len + 4 > body.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut pos = 4;
|
||||
let version = [*body.get(pos)?, *body.get(pos + 1)?];
|
||||
pos += 2;
|
||||
|
||||
let mut random = [0u8; 32];
|
||||
random.copy_from_slice(body.get(pos..pos + 32)?);
|
||||
pos += 32;
|
||||
|
||||
let session_len = *body.get(pos)? as usize;
|
||||
pos += 1;
|
||||
let session_id = body.get(pos..pos + session_len)?.to_vec();
|
||||
pos += session_len;
|
||||
|
||||
let cipher_suite = [*body.get(pos)?, *body.get(pos + 1)?];
|
||||
pos += 2;
|
||||
|
||||
let compression = *body.get(pos)?;
|
||||
pos += 1;
|
||||
|
||||
let ext_len = u16::from_be_bytes([*body.get(pos)?, *body.get(pos + 1)?]) as usize;
|
||||
pos += 2;
|
||||
let ext_end = pos.checked_add(ext_len)?;
|
||||
if ext_end > body.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut extensions = Vec::new();
|
||||
while pos + 4 <= ext_end {
|
||||
let etype = u16::from_be_bytes([body[pos], body[pos + 1]]);
|
||||
let elen = u16::from_be_bytes([body[pos + 2], body[pos + 3]]) as usize;
|
||||
pos += 4;
|
||||
let data = body.get(pos..pos + elen)?.to_vec();
|
||||
pos += elen;
|
||||
extensions.push(TlsExtension { ext_type: etype, data });
|
||||
}
|
||||
|
||||
Some(ParsedServerHello {
|
||||
version,
|
||||
random,
|
||||
session_id,
|
||||
cipher_suite,
|
||||
compression,
|
||||
extensions,
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_cert_info(certs: &[CertificateDer<'static>]) -> Option<ParsedCertificateInfo> {
|
||||
let first = certs.first()?;
|
||||
let (_rem, cert) = X509Certificate::from_der(first.as_ref()).ok()?;
|
||||
|
||||
let not_before = Some(cert.validity().not_before.to_datetime().unix_timestamp());
|
||||
let not_after = Some(cert.validity().not_after.to_datetime().unix_timestamp());
|
||||
|
||||
let issuer_cn = cert
|
||||
.issuer()
|
||||
.iter_common_name()
|
||||
.next()
|
||||
.and_then(|cn| cn.as_str().ok())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
let subject_cn = cert
|
||||
.subject()
|
||||
.iter_common_name()
|
||||
.next()
|
||||
.and_then(|cn| cn.as_str().ok())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
let san_names = cert
|
||||
.subject_alternative_name()
|
||||
.ok()
|
||||
.flatten()
|
||||
.map(|san| {
|
||||
san.value
|
||||
.general_names
|
||||
.iter()
|
||||
.filter_map(|gn| match gn {
|
||||
x509_parser::extensions::GeneralName::DNSName(n) => Some(n.to_string()),
|
||||
_ => None,
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
Some(ParsedCertificateInfo {
|
||||
not_after_unix: not_after,
|
||||
not_before_unix: not_before,
|
||||
issuer_cn,
|
||||
subject_cn,
|
||||
san_names,
|
||||
})
|
||||
}
|
||||
|
||||
fn u24_bytes(value: usize) -> Option<[u8; 3]> {
|
||||
if value > 0x00ff_ffff {
|
||||
return None;
|
||||
}
|
||||
Some([
|
||||
((value >> 16) & 0xff) as u8,
|
||||
((value >> 8) & 0xff) as u8,
|
||||
(value & 0xff) as u8,
|
||||
])
|
||||
}
|
||||
|
||||
async fn connect_with_dns_override(
|
||||
host: &str,
|
||||
port: u16,
|
||||
connect_timeout: Duration,
|
||||
) -> Result<TcpStream> {
|
||||
if let Some(addr) = resolve_socket_addr(host, port) {
|
||||
return Ok(timeout(connect_timeout, TcpStream::connect(addr)).await??);
|
||||
}
|
||||
Ok(timeout(connect_timeout, TcpStream::connect((host, port))).await??)
|
||||
}
|
||||
|
||||
async fn connect_tcp_with_upstream(
|
||||
host: &str,
|
||||
port: u16,
|
||||
connect_timeout: Duration,
|
||||
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
|
||||
) -> Result<TcpStream> {
|
||||
if let Some(manager) = upstream {
|
||||
if let Some(addr) = resolve_socket_addr(host, port) {
|
||||
match manager.connect(addr, None, None).await {
|
||||
Ok(stream) => return Ok(stream),
|
||||
Err(e) => {
|
||||
warn!(
|
||||
host = %host,
|
||||
port = port,
|
||||
error = %e,
|
||||
"Upstream connect failed, using direct connect"
|
||||
);
|
||||
}
|
||||
}
|
||||
} else if let Ok(mut addrs) = tokio::net::lookup_host((host, port)).await {
|
||||
if let Some(addr) = addrs.find(|a| a.is_ipv4()) {
|
||||
match manager.connect(addr, None, None).await {
|
||||
Ok(stream) => return Ok(stream),
|
||||
Err(e) => {
|
||||
warn!(
|
||||
host = %host,
|
||||
port = port,
|
||||
error = %e,
|
||||
"Upstream connect failed, using direct connect"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
connect_with_dns_override(host, port, connect_timeout).await
|
||||
}
|
||||
|
||||
fn encode_tls13_certificate_message(cert_chain_der: &[Vec<u8>]) -> Option<Vec<u8>> {
|
||||
if cert_chain_der.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut certificate_list = Vec::new();
|
||||
for cert in cert_chain_der {
|
||||
if cert.is_empty() {
|
||||
return None;
|
||||
}
|
||||
certificate_list.extend_from_slice(&u24_bytes(cert.len())?);
|
||||
certificate_list.extend_from_slice(cert);
|
||||
certificate_list.extend_from_slice(&0u16.to_be_bytes()); // cert_entry extensions
|
||||
}
|
||||
|
||||
// Certificate = context_len(1) + certificate_list_len(3) + entries
|
||||
let body_len = 1usize
|
||||
.checked_add(3)?
|
||||
.checked_add(certificate_list.len())?;
|
||||
|
||||
let mut message = Vec::with_capacity(4 + body_len);
|
||||
message.push(0x0b); // HandshakeType::certificate
|
||||
message.extend_from_slice(&u24_bytes(body_len)?);
|
||||
message.push(0x00); // certificate_request_context length
|
||||
message.extend_from_slice(&u24_bytes(certificate_list.len())?);
|
||||
message.extend_from_slice(&certificate_list);
|
||||
Some(message)
|
||||
}
|
||||
|
||||
async fn fetch_via_raw_tls_stream<S>(
|
||||
mut stream: S,
|
||||
sni: &str,
|
||||
connect_timeout: Duration,
|
||||
proxy_protocol: u8,
|
||||
) -> Result<TlsFetchResult>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
let rng = SecureRandom::new();
|
||||
let client_hello = build_client_hello(sni, &rng);
|
||||
timeout(connect_timeout, async {
|
||||
if proxy_protocol > 0 {
|
||||
let header = match proxy_protocol {
|
||||
2 => ProxyProtocolV2Builder::new().build(),
|
||||
_ => ProxyProtocolV1Builder::new().build(),
|
||||
};
|
||||
stream.write_all(&header).await?;
|
||||
}
|
||||
stream.write_all(&client_hello).await?;
|
||||
stream.flush().await?;
|
||||
Ok::<(), std::io::Error>(())
|
||||
})
|
||||
.await??;
|
||||
|
||||
let mut records = Vec::new();
|
||||
// Read up to 4 records: ServerHello, CCS, and up to two ApplicationData.
|
||||
for _ in 0..4 {
|
||||
match timeout(connect_timeout, read_tls_record(&mut stream)).await {
|
||||
Ok(Ok(rec)) => records.push(rec),
|
||||
Ok(Err(e)) => return Err(e),
|
||||
Err(_) => break,
|
||||
}
|
||||
if records.len() >= 3 && records.iter().any(|(t, _)| *t == TLS_RECORD_APPLICATION) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let mut app_sizes = Vec::new();
|
||||
let mut server_hello = None;
|
||||
for (t, body) in &records {
|
||||
if *t == TLS_RECORD_HANDSHAKE && server_hello.is_none() {
|
||||
server_hello = parse_server_hello(body);
|
||||
} else if *t == TLS_RECORD_APPLICATION {
|
||||
app_sizes.push(body.len());
|
||||
}
|
||||
}
|
||||
|
||||
let parsed = server_hello.ok_or_else(|| anyhow!("ServerHello not received"))?;
|
||||
let total_app_data_len = app_sizes.iter().sum::<usize>().max(1024);
|
||||
|
||||
Ok(TlsFetchResult {
|
||||
server_hello_parsed: parsed,
|
||||
app_data_records_sizes: if app_sizes.is_empty() {
|
||||
vec![total_app_data_len]
|
||||
} else {
|
||||
app_sizes
|
||||
},
|
||||
total_app_data_len,
|
||||
cert_info: None,
|
||||
cert_payload: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn fetch_via_raw_tls(
|
||||
host: &str,
|
||||
port: u16,
|
||||
sni: &str,
|
||||
connect_timeout: Duration,
|
||||
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
|
||||
proxy_protocol: u8,
|
||||
unix_sock: Option<&str>,
|
||||
) -> Result<TlsFetchResult> {
|
||||
#[cfg(unix)]
|
||||
if let Some(sock_path) = unix_sock {
|
||||
match timeout(connect_timeout, UnixStream::connect(sock_path)).await {
|
||||
Ok(Ok(stream)) => {
|
||||
debug!(
|
||||
sni = %sni,
|
||||
sock = %sock_path,
|
||||
"Raw TLS fetch using mask unix socket"
|
||||
);
|
||||
return fetch_via_raw_tls_stream(stream, sni, connect_timeout, proxy_protocol).await;
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
warn!(
|
||||
sni = %sni,
|
||||
sock = %sock_path,
|
||||
error = %e,
|
||||
"Raw TLS unix socket connect failed, falling back to TCP"
|
||||
);
|
||||
}
|
||||
Err(_) => {
|
||||
warn!(
|
||||
sni = %sni,
|
||||
sock = %sock_path,
|
||||
"Raw TLS unix socket connect timed out, falling back to TCP"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
let _ = unix_sock;
|
||||
|
||||
let stream = connect_tcp_with_upstream(host, port, connect_timeout, upstream).await?;
|
||||
fetch_via_raw_tls_stream(stream, sni, connect_timeout, proxy_protocol).await
|
||||
}
|
||||
|
||||
async fn fetch_via_rustls_stream<S>(
|
||||
mut stream: S,
|
||||
host: &str,
|
||||
sni: &str,
|
||||
proxy_protocol: u8,
|
||||
) -> Result<TlsFetchResult>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
// rustls handshake path for certificate and basic negotiated metadata.
|
||||
if proxy_protocol > 0 {
|
||||
let header = match proxy_protocol {
|
||||
2 => ProxyProtocolV2Builder::new().build(),
|
||||
_ => ProxyProtocolV1Builder::new().build(),
|
||||
};
|
||||
stream.write_all(&header).await?;
|
||||
stream.flush().await?;
|
||||
}
|
||||
|
||||
let config = build_client_config();
|
||||
let connector = TlsConnector::from(config);
|
||||
|
||||
let server_name = ServerName::try_from(sni.to_owned())
|
||||
.or_else(|_| ServerName::try_from(host.to_owned()))
|
||||
.map_err(|_| RustlsError::General("invalid SNI".into()))?;
|
||||
|
||||
let tls_stream: TlsStream<S> = connector.connect(server_name, stream).await?;
|
||||
|
||||
// Extract negotiated parameters and certificates
|
||||
let (_io, session) = tls_stream.get_ref();
|
||||
let cipher_suite = session
|
||||
.negotiated_cipher_suite()
|
||||
.map(|s| u16::from(s.suite()).to_be_bytes())
|
||||
.unwrap_or([0x13, 0x01]);
|
||||
|
||||
let certs: Vec<CertificateDer<'static>> = session
|
||||
.peer_certificates()
|
||||
.map(|slice| slice.to_vec())
|
||||
.unwrap_or_default();
|
||||
let cert_chain_der: Vec<Vec<u8>> = certs.iter().map(|c| c.as_ref().to_vec()).collect();
|
||||
let cert_payload = encode_tls13_certificate_message(&cert_chain_der).map(|certificate_message| {
|
||||
TlsCertPayload {
|
||||
cert_chain_der: cert_chain_der.clone(),
|
||||
certificate_message,
|
||||
}
|
||||
});
|
||||
|
||||
let total_cert_len = cert_payload
|
||||
.as_ref()
|
||||
.map(|payload| payload.certificate_message.len())
|
||||
.unwrap_or_else(|| cert_chain_der.iter().map(Vec::len).sum::<usize>())
|
||||
.max(1024);
|
||||
let cert_info = parse_cert_info(&certs);
|
||||
|
||||
// Heuristic: split across two records if large to mimic real servers a bit.
|
||||
let app_data_records_sizes = if total_cert_len > 3000 {
|
||||
vec![total_cert_len / 2, total_cert_len - total_cert_len / 2]
|
||||
} else {
|
||||
vec![total_cert_len]
|
||||
};
|
||||
|
||||
let parsed = ParsedServerHello {
|
||||
version: [0x03, 0x03],
|
||||
random: [0u8; 32],
|
||||
session_id: Vec::new(),
|
||||
cipher_suite,
|
||||
compression: 0,
|
||||
extensions: Vec::new(),
|
||||
};
|
||||
|
||||
debug!(
|
||||
sni = %sni,
|
||||
len = total_cert_len,
|
||||
cipher = format!("0x{:04x}", u16::from_be_bytes(cipher_suite)),
|
||||
has_cert_payload = cert_payload.is_some(),
|
||||
"Fetched TLS metadata via rustls"
|
||||
);
|
||||
|
||||
Ok(TlsFetchResult {
|
||||
server_hello_parsed: parsed,
|
||||
app_data_records_sizes: app_data_records_sizes.clone(),
|
||||
total_app_data_len: app_data_records_sizes.iter().sum(),
|
||||
cert_info,
|
||||
cert_payload,
|
||||
})
|
||||
}
|
||||
|
||||
async fn fetch_via_rustls(
|
||||
host: &str,
|
||||
port: u16,
|
||||
sni: &str,
|
||||
connect_timeout: Duration,
|
||||
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
|
||||
proxy_protocol: u8,
|
||||
unix_sock: Option<&str>,
|
||||
) -> Result<TlsFetchResult> {
|
||||
#[cfg(unix)]
|
||||
if let Some(sock_path) = unix_sock {
|
||||
match timeout(connect_timeout, UnixStream::connect(sock_path)).await {
|
||||
Ok(Ok(stream)) => {
|
||||
debug!(
|
||||
sni = %sni,
|
||||
sock = %sock_path,
|
||||
"Rustls fetch using mask unix socket"
|
||||
);
|
||||
return fetch_via_rustls_stream(stream, host, sni, proxy_protocol).await;
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
warn!(
|
||||
sni = %sni,
|
||||
sock = %sock_path,
|
||||
error = %e,
|
||||
"Rustls unix socket connect failed, falling back to TCP"
|
||||
);
|
||||
}
|
||||
Err(_) => {
|
||||
warn!(
|
||||
sni = %sni,
|
||||
sock = %sock_path,
|
||||
"Rustls unix socket connect timed out, falling back to TCP"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
let _ = unix_sock;
|
||||
|
||||
let stream = connect_tcp_with_upstream(host, port, connect_timeout, upstream).await?;
|
||||
fetch_via_rustls_stream(stream, host, sni, proxy_protocol).await
|
||||
}
|
||||
|
||||
/// Fetch real TLS metadata for the given SNI.
|
||||
///
|
||||
/// Strategy:
|
||||
/// 1) Probe raw TLS for realistic ServerHello and ApplicationData record sizes.
|
||||
/// 2) Fetch certificate chain via rustls to build cert payload.
|
||||
/// 3) Merge both when possible; otherwise auto-fallback to whichever succeeded.
|
||||
pub async fn fetch_real_tls(
|
||||
host: &str,
|
||||
port: u16,
|
||||
sni: &str,
|
||||
connect_timeout: Duration,
|
||||
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
|
||||
proxy_protocol: u8,
|
||||
unix_sock: Option<&str>,
|
||||
) -> Result<TlsFetchResult> {
|
||||
let raw_result = match fetch_via_raw_tls(
|
||||
host,
|
||||
port,
|
||||
sni,
|
||||
connect_timeout,
|
||||
upstream.clone(),
|
||||
proxy_protocol,
|
||||
unix_sock,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(res) => Some(res),
|
||||
Err(e) => {
|
||||
warn!(sni = %sni, error = %e, "Raw TLS fetch failed");
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
match fetch_via_rustls(
|
||||
host,
|
||||
port,
|
||||
sni,
|
||||
connect_timeout,
|
||||
upstream,
|
||||
proxy_protocol,
|
||||
unix_sock,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(rustls_result) => {
|
||||
if let Some(mut raw) = raw_result {
|
||||
raw.cert_info = rustls_result.cert_info;
|
||||
raw.cert_payload = rustls_result.cert_payload;
|
||||
debug!(sni = %sni, "Fetched TLS metadata via raw probe + rustls cert chain");
|
||||
Ok(raw)
|
||||
} else {
|
||||
Ok(rustls_result)
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
if let Some(raw) = raw_result {
|
||||
warn!(sni = %sni, error = %e, "Rustls cert fetch failed, using raw TLS metadata only");
|
||||
Ok(raw)
|
||||
} else {
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::encode_tls13_certificate_message;
|
||||
|
||||
fn read_u24(bytes: &[u8]) -> usize {
|
||||
((bytes[0] as usize) << 16) | ((bytes[1] as usize) << 8) | (bytes[2] as usize)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encode_tls13_certificate_message_single_cert() {
|
||||
let cert = vec![0x30, 0x03, 0x02, 0x01, 0x01];
|
||||
let message = encode_tls13_certificate_message(&[cert.clone()]).expect("message");
|
||||
|
||||
assert_eq!(message[0], 0x0b);
|
||||
assert_eq!(read_u24(&message[1..4]), message.len() - 4);
|
||||
assert_eq!(message[4], 0x00);
|
||||
|
||||
let cert_list_len = read_u24(&message[5..8]);
|
||||
assert_eq!(cert_list_len, cert.len() + 5);
|
||||
|
||||
let cert_len = read_u24(&message[8..11]);
|
||||
assert_eq!(cert_len, cert.len());
|
||||
assert_eq!(&message[11..11 + cert.len()], cert.as_slice());
|
||||
assert_eq!(&message[11 + cert.len()..13 + cert.len()], &[0x00, 0x00]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encode_tls13_certificate_message_empty_chain() {
|
||||
assert!(encode_tls13_certificate_message(&[]).is_none());
|
||||
}
|
||||
}
|
||||
8
src/tls_front/mod.rs
Normal file
8
src/tls_front/mod.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
pub mod types;
|
||||
pub mod cache;
|
||||
pub mod fetcher;
|
||||
pub mod emulator;
|
||||
|
||||
pub use cache::TlsFrontCache;
|
||||
#[allow(unused_imports)]
|
||||
pub use types::{CachedTlsData, TlsFetchResult};
|
||||
68
src/tls_front/types.rs
Normal file
68
src/tls_front/types.rs
Normal file
@@ -0,0 +1,68 @@
|
||||
use std::time::SystemTime;
|
||||
use serde::{Serialize, Deserialize};
|
||||
|
||||
/// Parsed representation of an unencrypted TLS ServerHello.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ParsedServerHello {
|
||||
pub version: [u8; 2],
|
||||
pub random: [u8; 32],
|
||||
pub session_id: Vec<u8>,
|
||||
pub cipher_suite: [u8; 2],
|
||||
pub compression: u8,
|
||||
pub extensions: Vec<TlsExtension>,
|
||||
}
|
||||
|
||||
/// Generic TLS extension container.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TlsExtension {
|
||||
pub ext_type: u16,
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
/// Basic certificate metadata (optional, informative).
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ParsedCertificateInfo {
|
||||
pub not_after_unix: Option<i64>,
|
||||
pub not_before_unix: Option<i64>,
|
||||
pub issuer_cn: Option<String>,
|
||||
pub subject_cn: Option<String>,
|
||||
pub san_names: Vec<String>,
|
||||
}
|
||||
|
||||
/// TLS certificate payload captured from profiled upstream.
|
||||
///
|
||||
/// `certificate_message` stores an encoded TLS 1.3 Certificate handshake
|
||||
/// message body that can be replayed as opaque ApplicationData bytes in FakeTLS.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TlsCertPayload {
|
||||
pub cert_chain_der: Vec<Vec<u8>>,
|
||||
pub certificate_message: Vec<u8>,
|
||||
}
|
||||
|
||||
/// Cached data per SNI used by the emulator.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CachedTlsData {
|
||||
pub server_hello_template: ParsedServerHello,
|
||||
pub cert_info: Option<ParsedCertificateInfo>,
|
||||
#[serde(default)]
|
||||
pub cert_payload: Option<TlsCertPayload>,
|
||||
pub app_data_records_sizes: Vec<usize>,
|
||||
pub total_app_data_len: usize,
|
||||
#[serde(default = "now_system_time", skip_serializing, skip_deserializing)]
|
||||
pub fetched_at: SystemTime,
|
||||
pub domain: String,
|
||||
}
|
||||
|
||||
fn now_system_time() -> SystemTime {
|
||||
SystemTime::now()
|
||||
}
|
||||
|
||||
/// Result of attempting to fetch real TLS artifacts.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TlsFetchResult {
|
||||
pub server_hello_parsed: ParsedServerHello,
|
||||
pub app_data_records_sizes: Vec<usize>,
|
||||
pub total_app_data_len: usize,
|
||||
pub cert_info: Option<ParsedCertificateInfo>,
|
||||
pub cert_payload: Option<TlsCertPayload>,
|
||||
}
|
||||
@@ -1,925 +0,0 @@
|
||||
//! Middle Proxy RPC Transport
|
||||
//!
|
||||
//! Implements Telegram Middle-End RPC protocol for routing to ALL DCs (including CDN).
|
||||
//!
|
||||
//! ## Phase 3 fixes:
|
||||
//! - ROOT CAUSE: Use Telegram proxy-secret (binary file) not user secret
|
||||
//! - Streaming handshake response (no fixed-size read deadlock)
|
||||
//! - Health monitoring + reconnection
|
||||
//! - Hex diagnostics for debugging
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::time::Duration;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::sync::{mpsc, Mutex, RwLock};
|
||||
use tokio::time::{timeout, Instant};
|
||||
use tracing::{debug, info, trace, warn, error};
|
||||
|
||||
use crate::crypto::{crc32, derive_middleproxy_keys, AesCbc, SecureRandom};
|
||||
use crate::error::{ProxyError, Result};
|
||||
use crate::protocol::constants::*;
|
||||
|
||||
// ========== Proxy Secret Fetching ==========
|
||||
|
||||
/// Fetch the Telegram proxy-secret binary file.
|
||||
///
|
||||
/// This is NOT the user secret (-S flag, 16 bytes hex for clients).
|
||||
/// This is the infrastructure secret (--aes-pwd in C MTProxy),
|
||||
/// a binary file of 32-512 bytes used for ME RPC key derivation.
|
||||
///
|
||||
/// Strategy: try local cache, then download from Telegram.
|
||||
pub async fn fetch_proxy_secret(cache_path: Option<&str>) -> Result<Vec<u8>> {
|
||||
let cache = cache_path.unwrap_or("proxy-secret");
|
||||
|
||||
// 1. Try local cache (< 24h old)
|
||||
if let Ok(metadata) = tokio::fs::metadata(cache).await {
|
||||
if let Ok(modified) = metadata.modified() {
|
||||
let age = std::time::SystemTime::now()
|
||||
.duration_since(modified)
|
||||
.unwrap_or(Duration::from_secs(u64::MAX));
|
||||
if age < Duration::from_secs(86400) {
|
||||
if let Ok(data) = tokio::fs::read(cache).await {
|
||||
if data.len() >= 32 {
|
||||
info!(
|
||||
path = cache,
|
||||
len = data.len(),
|
||||
age_hours = age.as_secs() / 3600,
|
||||
"Loaded proxy-secret from cache"
|
||||
);
|
||||
return Ok(data);
|
||||
}
|
||||
warn!(path = cache, len = data.len(), "Cached proxy-secret too short");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Download from Telegram
|
||||
info!("Downloading proxy-secret from core.telegram.org...");
|
||||
let data = download_proxy_secret().await?;
|
||||
|
||||
// 3. Cache locally (best-effort)
|
||||
if let Err(e) = tokio::fs::write(cache, &data).await {
|
||||
warn!(error = %e, "Failed to cache proxy-secret (non-fatal)");
|
||||
} else {
|
||||
debug!(path = cache, len = data.len(), "Cached proxy-secret");
|
||||
}
|
||||
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
async fn download_proxy_secret() -> Result<Vec<u8>> {
|
||||
let url = "https://core.telegram.org/getProxySecret";
|
||||
let resp = reqwest::get(url)
|
||||
.await
|
||||
.map_err(|e| ProxyError::Proxy(format!("Failed to download proxy-secret: {}", e)))?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
return Err(ProxyError::Proxy(format!(
|
||||
"proxy-secret download HTTP {}", resp.status()
|
||||
)));
|
||||
}
|
||||
|
||||
let data = resp.bytes().await
|
||||
.map_err(|e| ProxyError::Proxy(format!("Read proxy-secret body: {}", e)))?
|
||||
.to_vec();
|
||||
|
||||
if data.len() < 32 {
|
||||
return Err(ProxyError::Proxy(format!(
|
||||
"proxy-secret too short: {} bytes (need >= 32)", data.len()
|
||||
)));
|
||||
}
|
||||
|
||||
info!(len = data.len(), "Downloaded proxy-secret OK");
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
// ========== RPC Frame helpers ==========
|
||||
|
||||
/// Build an RPC frame: [len(4) | seq_no(4) | payload | crc32(4)]
|
||||
fn build_rpc_frame(seq_no: i32, payload: &[u8]) -> Vec<u8> {
|
||||
let total_len = (4 + 4 + payload.len() + 4) as u32;
|
||||
let mut f = Vec::with_capacity(total_len as usize);
|
||||
f.extend_from_slice(&total_len.to_le_bytes());
|
||||
f.extend_from_slice(&seq_no.to_le_bytes());
|
||||
f.extend_from_slice(payload);
|
||||
let c = crc32(&f);
|
||||
f.extend_from_slice(&c.to_le_bytes());
|
||||
f
|
||||
}
|
||||
|
||||
/// Read one plaintext RPC frame. Returns (seq_no, payload).
|
||||
async fn read_rpc_frame_plaintext(
|
||||
rd: &mut (impl AsyncReadExt + Unpin),
|
||||
) -> Result<(i32, Vec<u8>)> {
|
||||
let mut len_buf = [0u8; 4];
|
||||
rd.read_exact(&mut len_buf).await.map_err(ProxyError::Io)?;
|
||||
let total_len = u32::from_le_bytes(len_buf) as usize;
|
||||
|
||||
if total_len < 12 || total_len > (1 << 24) {
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
format!("Bad RPC frame length: {}", total_len),
|
||||
));
|
||||
}
|
||||
|
||||
let mut rest = vec![0u8; total_len - 4];
|
||||
rd.read_exact(&mut rest).await.map_err(ProxyError::Io)?;
|
||||
|
||||
let mut full = Vec::with_capacity(total_len);
|
||||
full.extend_from_slice(&len_buf);
|
||||
full.extend_from_slice(&rest);
|
||||
|
||||
let crc_offset = total_len - 4;
|
||||
let expected_crc = u32::from_le_bytes([
|
||||
full[crc_offset], full[crc_offset + 1],
|
||||
full[crc_offset + 2], full[crc_offset + 3],
|
||||
]);
|
||||
let actual_crc = crc32(&full[..crc_offset]);
|
||||
if expected_crc != actual_crc {
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
format!("CRC mismatch: 0x{:08x} vs 0x{:08x}", expected_crc, actual_crc),
|
||||
));
|
||||
}
|
||||
|
||||
let seq_no = i32::from_le_bytes([full[4], full[5], full[6], full[7]]);
|
||||
let payload = full[8..crc_offset].to_vec();
|
||||
Ok((seq_no, payload))
|
||||
}
|
||||
|
||||
// ========== RPC Nonce (32 bytes payload) ==========
|
||||
|
||||
fn build_nonce_payload(key_selector: u32, crypto_ts: u32, nonce: &[u8; 16]) -> [u8; 32] {
|
||||
let mut p = [0u8; 32];
|
||||
p[0..4].copy_from_slice(&RPC_NONCE_U32.to_le_bytes());
|
||||
p[4..8].copy_from_slice(&key_selector.to_le_bytes());
|
||||
p[8..12].copy_from_slice(&RPC_CRYPTO_AES_U32.to_le_bytes());
|
||||
p[12..16].copy_from_slice(&crypto_ts.to_le_bytes());
|
||||
p[16..32].copy_from_slice(nonce);
|
||||
p
|
||||
}
|
||||
|
||||
fn parse_nonce_payload(d: &[u8]) -> Result<(u32, u32, [u8; 16])> {
|
||||
if d.len() < 32 {
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
format!("Nonce payload too short: {} bytes", d.len()),
|
||||
));
|
||||
}
|
||||
let t = u32::from_le_bytes([d[0], d[1], d[2], d[3]]);
|
||||
if t != RPC_NONCE_U32 {
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
format!("Expected RPC_NONCE 0x{:08x}, got 0x{:08x}", RPC_NONCE_U32, t),
|
||||
));
|
||||
}
|
||||
let schema = u32::from_le_bytes([d[8], d[9], d[10], d[11]]);
|
||||
let ts = u32::from_le_bytes([d[12], d[13], d[14], d[15]]);
|
||||
let mut nonce = [0u8; 16];
|
||||
nonce.copy_from_slice(&d[16..32]);
|
||||
Ok((schema, ts, nonce))
|
||||
}
|
||||
|
||||
// ========== RPC Handshake (32 bytes payload) ==========
|
||||
|
||||
fn build_handshake_payload(our_ip: u32, our_port: u16, peer_ip: u32, peer_port: u16) -> [u8; 32] {
|
||||
let mut p = [0u8; 32];
|
||||
p[0..4].copy_from_slice(&RPC_HANDSHAKE_U32.to_le_bytes());
|
||||
// flags = 0 at offset 4..8
|
||||
|
||||
// sender_pid: {ip(4), port(2), pid(2), utime(4)} at offset 8..20
|
||||
p[8..12].copy_from_slice(&our_ip.to_le_bytes());
|
||||
p[12..14].copy_from_slice(&our_port.to_le_bytes());
|
||||
let pid = (std::process::id() & 0xFFFF) as u16;
|
||||
p[14..16].copy_from_slice(&pid.to_le_bytes());
|
||||
let utime = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs() as u32;
|
||||
p[16..20].copy_from_slice(&utime.to_le_bytes());
|
||||
|
||||
// peer_pid: {ip(4), port(2), pid(2), utime(4)} at offset 20..32
|
||||
p[20..24].copy_from_slice(&peer_ip.to_le_bytes());
|
||||
p[24..26].copy_from_slice(&peer_port.to_le_bytes());
|
||||
p
|
||||
}
|
||||
|
||||
// ========== CBC helpers ==========
|
||||
|
||||
fn cbc_encrypt_padded(key: &[u8; 32], iv: &[u8; 16], plaintext: &[u8]) -> Result<(Vec<u8>, [u8; 16])> {
|
||||
let pad = (16 - (plaintext.len() % 16)) % 16;
|
||||
let mut buf = plaintext.to_vec();
|
||||
let pad_pattern: [u8; 4] = [0x04, 0x00, 0x00, 0x00];
|
||||
for i in 0..pad {
|
||||
buf.push(pad_pattern[i % 4]);
|
||||
}
|
||||
let cipher = AesCbc::new(*key, *iv);
|
||||
cipher.encrypt_in_place(&mut buf)
|
||||
.map_err(|e| ProxyError::Crypto(format!("CBC encrypt: {}", e)))?;
|
||||
let mut new_iv = [0u8; 16];
|
||||
if buf.len() >= 16 {
|
||||
new_iv.copy_from_slice(&buf[buf.len() - 16..]);
|
||||
}
|
||||
Ok((buf, new_iv))
|
||||
}
|
||||
|
||||
fn cbc_decrypt_inplace(key: &[u8; 32], iv: &[u8; 16], data: &mut [u8]) -> Result<[u8; 16]> {
|
||||
let mut new_iv = [0u8; 16];
|
||||
if data.len() >= 16 {
|
||||
new_iv.copy_from_slice(&data[data.len() - 16..]);
|
||||
}
|
||||
AesCbc::new(*key, *iv)
|
||||
.decrypt_in_place(data)
|
||||
.map_err(|e| ProxyError::Crypto(format!("CBC decrypt: {}", e)))?;
|
||||
Ok(new_iv)
|
||||
}
|
||||
|
||||
// ========== IPv4 helpers ==========
|
||||
|
||||
fn ipv4_to_mapped_v6(ip: Ipv4Addr) -> [u8; 16] {
|
||||
let mut buf = [0u8; 16];
|
||||
buf[10] = 0xFF;
|
||||
buf[11] = 0xFF;
|
||||
let o = ip.octets();
|
||||
buf[12] = o[0]; buf[13] = o[1]; buf[14] = o[2]; buf[15] = o[3];
|
||||
buf
|
||||
}
|
||||
|
||||
fn addr_to_ip_u32(addr: &SocketAddr) -> u32 {
|
||||
match addr.ip() {
|
||||
IpAddr::V4(v4) => u32::from_be_bytes(v4.octets()),
|
||||
IpAddr::V6(v6) => {
|
||||
if let Some(v4) = v6.to_ipv4_mapped() {
|
||||
u32::from_be_bytes(v4.octets())
|
||||
} else { 0 }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ========== ME Response ==========
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum MeResponse {
|
||||
Data(Bytes),
|
||||
Ack(u32),
|
||||
Close,
|
||||
}
|
||||
|
||||
// ========== Connection Registry ==========
|
||||
|
||||
pub struct ConnRegistry {
|
||||
map: RwLock<HashMap<u64, mpsc::Sender<MeResponse>>>,
|
||||
next_id: AtomicU64,
|
||||
}
|
||||
|
||||
impl ConnRegistry {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
map: RwLock::new(HashMap::new()),
|
||||
next_id: AtomicU64::new(1),
|
||||
}
|
||||
}
|
||||
pub async fn register(&self) -> (u64, mpsc::Receiver<MeResponse>) {
|
||||
let id = self.next_id.fetch_add(1, Ordering::Relaxed);
|
||||
let (tx, rx) = mpsc::channel(256);
|
||||
self.map.write().await.insert(id, tx);
|
||||
(id, rx)
|
||||
}
|
||||
pub async fn unregister(&self, id: u64) {
|
||||
self.map.write().await.remove(&id);
|
||||
}
|
||||
pub async fn route(&self, id: u64, resp: MeResponse) -> bool {
|
||||
let m = self.map.read().await;
|
||||
if let Some(tx) = m.get(&id) {
|
||||
tx.send(resp).await.is_ok()
|
||||
} else { false }
|
||||
}
|
||||
}
|
||||
|
||||
// ========== RPC Writer (streaming CBC) ==========
|
||||
|
||||
struct RpcWriter {
|
||||
writer: tokio::io::WriteHalf<TcpStream>,
|
||||
key: [u8; 32],
|
||||
iv: [u8; 16],
|
||||
seq_no: i32,
|
||||
}
|
||||
|
||||
impl RpcWriter {
|
||||
async fn send(&mut self, payload: &[u8]) -> Result<()> {
|
||||
let frame = build_rpc_frame(self.seq_no, payload);
|
||||
self.seq_no += 1;
|
||||
|
||||
let pad = (16 - (frame.len() % 16)) % 16;
|
||||
let mut buf = frame;
|
||||
let pad_pattern: [u8; 4] = [0x04, 0x00, 0x00, 0x00];
|
||||
for i in 0..pad {
|
||||
buf.push(pad_pattern[i % 4]);
|
||||
}
|
||||
|
||||
let cipher = AesCbc::new(self.key, self.iv);
|
||||
cipher.encrypt_in_place(&mut buf)
|
||||
.map_err(|e| ProxyError::Crypto(format!("{}", e)))?;
|
||||
|
||||
if buf.len() >= 16 {
|
||||
self.iv.copy_from_slice(&buf[buf.len() - 16..]);
|
||||
}
|
||||
self.writer.write_all(&buf).await.map_err(ProxyError::Io)
|
||||
}
|
||||
}
|
||||
|
||||
// ========== RPC_PROXY_REQ ==========
|
||||
|
||||
|
||||
fn build_proxy_req_payload(
|
||||
conn_id: u64,
|
||||
client_addr: SocketAddr,
|
||||
our_addr: SocketAddr,
|
||||
data: &[u8],
|
||||
proxy_tag: Option<&[u8]>,
|
||||
proto_flags: u32,
|
||||
) -> Vec<u8> {
|
||||
// flags are pre-calculated by proto_flags_for_tag
|
||||
// We just need to ensure FLAG_HAS_AD_TAG is set if we have a tag (it is set by default in our new function, but let's be safe)
|
||||
let mut flags = proto_flags;
|
||||
|
||||
// The C code logic:
|
||||
// flags = (transport_flags) | 0x1000 | 0x20000 | 0x8 (if tag)
|
||||
// Our proto_flags_for_tag returns: 0x8 | 0x1000 | 0x20000 | transport_flags
|
||||
// So we are good.
|
||||
|
||||
let b_cap = 128 + data.len();
|
||||
let mut b = Vec::with_capacity(b_cap);
|
||||
|
||||
b.extend_from_slice(&RPC_PROXY_REQ_U32.to_le_bytes());
|
||||
b.extend_from_slice(&flags.to_le_bytes());
|
||||
b.extend_from_slice(&conn_id.to_le_bytes());
|
||||
|
||||
// Client IP (16 bytes IPv4-mapped-v6) + port (4 bytes)
|
||||
match client_addr.ip() {
|
||||
IpAddr::V4(v4) => b.extend_from_slice(&ipv4_to_mapped_v6(v4)),
|
||||
IpAddr::V6(v6) => b.extend_from_slice(&v6.octets()),
|
||||
}
|
||||
b.extend_from_slice(&(client_addr.port() as u32).to_le_bytes());
|
||||
|
||||
// Our IP (16 bytes) + port (4 bytes)
|
||||
match our_addr.ip() {
|
||||
IpAddr::V4(v4) => b.extend_from_slice(&ipv4_to_mapped_v6(v4)),
|
||||
IpAddr::V6(v6) => b.extend_from_slice(&v6.octets()),
|
||||
}
|
||||
b.extend_from_slice(&(our_addr.port() as u32).to_le_bytes());
|
||||
|
||||
// Extra section (proxy_tag)
|
||||
if flags & 12 != 0 {
|
||||
let extra_start = b.len();
|
||||
b.extend_from_slice(&0u32.to_le_bytes()); // placeholder
|
||||
|
||||
if let Some(tag) = proxy_tag {
|
||||
b.extend_from_slice(&TL_PROXY_TAG_U32.to_le_bytes());
|
||||
// TL string encoding
|
||||
if tag.len() < 254 {
|
||||
b.push(tag.len() as u8);
|
||||
b.extend_from_slice(tag);
|
||||
let pad = (4 - ((1 + tag.len()) % 4)) % 4;
|
||||
b.extend(std::iter::repeat(0u8).take(pad));
|
||||
} else {
|
||||
b.push(0xfe);
|
||||
let len_bytes = (tag.len() as u32).to_le_bytes();
|
||||
b.extend_from_slice(&len_bytes[..3]);
|
||||
b.extend_from_slice(tag);
|
||||
let pad = (4 - (tag.len() % 4)) % 4;
|
||||
b.extend(std::iter::repeat(0u8).take(pad));
|
||||
}
|
||||
}
|
||||
|
||||
let extra_bytes = (b.len() - extra_start - 4) as u32;
|
||||
let eb = extra_bytes.to_le_bytes();
|
||||
b[extra_start..extra_start + 4].copy_from_slice(&eb);
|
||||
}
|
||||
|
||||
b.extend_from_slice(data);
|
||||
b
|
||||
}
|
||||
|
||||
// ========== ME Pool ==========
|
||||
|
||||
pub struct MePool {
|
||||
registry: Arc<ConnRegistry>,
|
||||
writers: Arc<RwLock<Vec<Arc<Mutex<RpcWriter>>>>>,
|
||||
rr: AtomicU64,
|
||||
proxy_tag: Option<Vec<u8>>,
|
||||
/// Telegram proxy-secret (binary, 32-512 bytes)
|
||||
proxy_secret: Vec<u8>,
|
||||
pool_size: usize,
|
||||
}
|
||||
|
||||
impl MePool {
|
||||
pub fn new(proxy_tag: Option<Vec<u8>>, proxy_secret: Vec<u8>) -> Arc<Self> {
|
||||
Arc::new(Self {
|
||||
registry: Arc::new(ConnRegistry::new()),
|
||||
writers: Arc::new(RwLock::new(Vec::new())),
|
||||
rr: AtomicU64::new(0),
|
||||
proxy_tag,
|
||||
proxy_secret,
|
||||
pool_size: 2,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn registry(&self) -> &Arc<ConnRegistry> {
|
||||
&self.registry
|
||||
}
|
||||
|
||||
fn writers_arc(&self) -> Arc<RwLock<Vec<Arc<Mutex<RpcWriter>>>>> {
|
||||
self.writers.clone()
|
||||
}
|
||||
|
||||
/// key_selector = first 4 bytes of proxy-secret as LE u32
|
||||
/// C: main_secret.key_signature via union { char secret[]; int key_signature; }
|
||||
fn key_selector(&self) -> u32 {
|
||||
if self.proxy_secret.len() >= 4 {
|
||||
u32::from_le_bytes([
|
||||
self.proxy_secret[0], self.proxy_secret[1],
|
||||
self.proxy_secret[2], self.proxy_secret[3],
|
||||
])
|
||||
} else { 0 }
|
||||
}
|
||||
|
||||
pub async fn init(
|
||||
self: &Arc<Self>,
|
||||
pool_size: usize,
|
||||
rng: &SecureRandom,
|
||||
) -> Result<()> {
|
||||
let addrs = &*TG_MIDDLE_PROXIES_FLAT_V4;
|
||||
let ks = self.key_selector();
|
||||
info!(
|
||||
me_servers = addrs.len(),
|
||||
pool_size,
|
||||
key_selector = format_args!("0x{:08x}", ks),
|
||||
secret_len = self.proxy_secret.len(),
|
||||
"Initializing ME pool"
|
||||
);
|
||||
|
||||
for &(ip, port) in addrs.iter() {
|
||||
for i in 0..pool_size {
|
||||
let addr = SocketAddr::new(ip, port);
|
||||
match self.connect_one(addr, rng).await {
|
||||
Ok(()) => info!(%addr, idx = i, "ME connected"),
|
||||
Err(e) => warn!(%addr, idx = i, error = %e, "ME connect failed"),
|
||||
}
|
||||
}
|
||||
if self.writers.read().await.len() >= pool_size {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if self.writers.read().await.is_empty() {
|
||||
return Err(ProxyError::Proxy("No ME connections".into()));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn connect_one(
|
||||
self: &Arc<Self>,
|
||||
addr: SocketAddr,
|
||||
rng: &SecureRandom,
|
||||
) -> Result<()> {
|
||||
let secret = &self.proxy_secret;
|
||||
if secret.len() < 32 {
|
||||
return Err(ProxyError::Proxy("proxy-secret too short for ME auth".into()));
|
||||
}
|
||||
|
||||
// ===== TCP connect =====
|
||||
let stream = timeout(
|
||||
Duration::from_secs(ME_CONNECT_TIMEOUT_SECS),
|
||||
TcpStream::connect(addr),
|
||||
)
|
||||
.await
|
||||
.map_err(|_| ProxyError::ConnectionTimeout { addr: addr.to_string() })?
|
||||
.map_err(ProxyError::Io)?;
|
||||
stream.set_nodelay(true).ok();
|
||||
|
||||
let local_addr = stream.local_addr().map_err(ProxyError::Io)?;
|
||||
let peer_addr = stream.peer_addr().map_err(ProxyError::Io)?;
|
||||
let (mut rd, mut wr) = tokio::io::split(stream);
|
||||
|
||||
// ===== 1. Send RPC nonce (plaintext, seq=-2) =====
|
||||
let my_nonce: [u8; 16] = rng.bytes(16).try_into().unwrap();
|
||||
let crypto_ts = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs() as u32;
|
||||
let ks = self.key_selector();
|
||||
|
||||
let nonce_payload = build_nonce_payload(ks, crypto_ts, &my_nonce);
|
||||
let nonce_frame = build_rpc_frame(-2, &nonce_payload);
|
||||
|
||||
debug!(
|
||||
%addr,
|
||||
frame_len = nonce_frame.len(),
|
||||
key_sel = format_args!("0x{:08x}", ks),
|
||||
crypto_ts,
|
||||
"Sending nonce"
|
||||
);
|
||||
|
||||
wr.write_all(&nonce_frame).await.map_err(ProxyError::Io)?;
|
||||
wr.flush().await.map_err(ProxyError::Io)?;
|
||||
|
||||
// ===== 2. Read server nonce (plaintext, seq=-2) =====
|
||||
let (srv_seq, srv_nonce_payload) = timeout(
|
||||
Duration::from_secs(ME_HANDSHAKE_TIMEOUT_SECS),
|
||||
read_rpc_frame_plaintext(&mut rd),
|
||||
)
|
||||
.await
|
||||
.map_err(|_| ProxyError::TgHandshakeTimeout)??;
|
||||
|
||||
if srv_seq != -2 {
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
format!("Expected seq=-2, got {}", srv_seq),
|
||||
));
|
||||
}
|
||||
|
||||
let (schema, _srv_ts, srv_nonce) = parse_nonce_payload(&srv_nonce_payload)?;
|
||||
if schema != RPC_CRYPTO_AES_U32 {
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
format!("Unsupported crypto schema: 0x{:x}", schema),
|
||||
));
|
||||
}
|
||||
|
||||
debug!(%addr, "Nonce exchange OK, deriving keys");
|
||||
|
||||
// ===== 3. Derive AES-256-CBC keys =====
|
||||
// C buffer layout:
|
||||
// [0..16] nonce_server (srv_nonce)
|
||||
// [16..32] nonce_client (my_nonce)
|
||||
// [32..36] client_timestamp
|
||||
// [36..40] server_ip
|
||||
// [40..42] client_port
|
||||
// [42..48] "CLIENT" or "SERVER"
|
||||
// [48..52] client_ip
|
||||
// [52..54] server_port
|
||||
// [54..54+N] secret (proxy-secret binary)
|
||||
// [54+N..70+N] nonce_server
|
||||
// nonce_client(16)
|
||||
|
||||
let ts_bytes = crypto_ts.to_le_bytes();
|
||||
let server_ip = addr_to_ip_u32(&peer_addr);
|
||||
let client_ip = addr_to_ip_u32(&local_addr);
|
||||
let server_ip_bytes = server_ip.to_le_bytes();
|
||||
let client_ip_bytes = client_ip.to_le_bytes();
|
||||
let server_port_bytes = peer_addr.port().to_le_bytes();
|
||||
let client_port_bytes = local_addr.port().to_le_bytes();
|
||||
|
||||
let (wk, wi) = derive_middleproxy_keys(
|
||||
&srv_nonce, &my_nonce, &ts_bytes,
|
||||
Some(&server_ip_bytes), &client_port_bytes,
|
||||
b"CLIENT",
|
||||
Some(&client_ip_bytes), &server_port_bytes,
|
||||
secret, None, None,
|
||||
);
|
||||
let (rk, ri) = derive_middleproxy_keys(
|
||||
&srv_nonce, &my_nonce, &ts_bytes,
|
||||
Some(&server_ip_bytes), &client_port_bytes,
|
||||
b"SERVER",
|
||||
Some(&client_ip_bytes), &server_port_bytes,
|
||||
secret, None, None,
|
||||
);
|
||||
|
||||
debug!(
|
||||
%addr,
|
||||
write_key = %hex::encode(&wk[..8]),
|
||||
read_key = %hex::encode(&rk[..8]),
|
||||
"Keys derived"
|
||||
);
|
||||
|
||||
// ===== 4. Send encrypted handshake (seq=-1) =====
|
||||
let hs_payload = build_handshake_payload(
|
||||
client_ip, local_addr.port(),
|
||||
server_ip, peer_addr.port(),
|
||||
);
|
||||
let hs_frame = build_rpc_frame(-1, &hs_payload);
|
||||
let (encrypted_hs, write_iv) = cbc_encrypt_padded(&wk, &wi, &hs_frame)?;
|
||||
wr.write_all(&encrypted_hs).await.map_err(ProxyError::Io)?;
|
||||
wr.flush().await.map_err(ProxyError::Io)?;
|
||||
|
||||
debug!(%addr, enc_len = encrypted_hs.len(), "Sent encrypted handshake");
|
||||
|
||||
// ===== 5. Read encrypted handshake response (STREAMING) =====
|
||||
// Server sends encrypted handshake. C crypto layer may send partial
|
||||
// blocks (only complete 16-byte blocks get encrypted at a time).
|
||||
// We read incrementally and decrypt block-by-block.
|
||||
let deadline = Instant::now() + Duration::from_secs(ME_HANDSHAKE_TIMEOUT_SECS);
|
||||
let mut enc_buf = BytesMut::with_capacity(256);
|
||||
let mut dec_buf = BytesMut::with_capacity(256);
|
||||
let mut read_iv = ri;
|
||||
let mut handshake_ok = false;
|
||||
|
||||
while Instant::now() < deadline && !handshake_ok {
|
||||
let remaining = deadline - Instant::now();
|
||||
let mut tmp = [0u8; 256];
|
||||
let n = match timeout(remaining, rd.read(&mut tmp)).await {
|
||||
Ok(Ok(0)) => return Err(ProxyError::Io(std::io::Error::new(
|
||||
std::io::ErrorKind::UnexpectedEof, "ME closed during handshake",
|
||||
))),
|
||||
Ok(Ok(n)) => n,
|
||||
Ok(Err(e)) => return Err(ProxyError::Io(e)),
|
||||
Err(_) => return Err(ProxyError::TgHandshakeTimeout),
|
||||
};
|
||||
enc_buf.extend_from_slice(&tmp[..n]);
|
||||
|
||||
// Decrypt complete 16-byte blocks
|
||||
let blocks = enc_buf.len() / 16 * 16;
|
||||
if blocks > 0 {
|
||||
let mut chunk = vec![0u8; blocks];
|
||||
chunk.copy_from_slice(&enc_buf[..blocks]);
|
||||
let new_iv = cbc_decrypt_inplace(&rk, &read_iv, &mut chunk)?;
|
||||
read_iv = new_iv;
|
||||
dec_buf.extend_from_slice(&chunk);
|
||||
let _ = enc_buf.split_to(blocks);
|
||||
}
|
||||
|
||||
// Try to parse RPC frame from decrypted data
|
||||
while dec_buf.len() >= 4 {
|
||||
let fl = u32::from_le_bytes([
|
||||
dec_buf[0], dec_buf[1], dec_buf[2], dec_buf[3],
|
||||
]) as usize;
|
||||
|
||||
// Skip noop padding
|
||||
if fl == 4 {
|
||||
let _ = dec_buf.split_to(4);
|
||||
continue;
|
||||
}
|
||||
if fl < 12 || fl > (1 << 24) {
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
format!("Bad HS response frame len: {}", fl),
|
||||
));
|
||||
}
|
||||
if dec_buf.len() < fl {
|
||||
break; // need more data
|
||||
}
|
||||
|
||||
let frame = dec_buf.split_to(fl);
|
||||
|
||||
// CRC32 check
|
||||
let pe = fl - 4;
|
||||
let ec = u32::from_le_bytes([
|
||||
frame[pe], frame[pe + 1], frame[pe + 2], frame[pe + 3],
|
||||
]);
|
||||
let ac = crc32(&frame[..pe]);
|
||||
if ec != ac {
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
format!("HS CRC mismatch: 0x{:08x} vs 0x{:08x}", ec, ac),
|
||||
));
|
||||
}
|
||||
|
||||
// Check type
|
||||
let hs_type = u32::from_le_bytes([
|
||||
frame[8], frame[9], frame[10], frame[11],
|
||||
]);
|
||||
if hs_type == RPC_HANDSHAKE_ERROR_U32 {
|
||||
let err_code = if frame.len() >= 16 {
|
||||
i32::from_le_bytes([frame[12], frame[13], frame[14], frame[15]])
|
||||
} else { -1 };
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
format!("ME rejected handshake (error={})", err_code),
|
||||
));
|
||||
}
|
||||
if hs_type != RPC_HANDSHAKE_U32 {
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
format!("Expected HANDSHAKE 0x{:08x}, got 0x{:08x}", RPC_HANDSHAKE_U32, hs_type),
|
||||
));
|
||||
}
|
||||
|
||||
handshake_ok = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if !handshake_ok {
|
||||
return Err(ProxyError::TgHandshakeTimeout);
|
||||
}
|
||||
|
||||
info!(%addr, "RPC handshake OK");
|
||||
|
||||
// ===== 6. Setup writer + reader =====
|
||||
let rpc_w = Arc::new(Mutex::new(RpcWriter {
|
||||
writer: wr,
|
||||
key: wk,
|
||||
iv: write_iv,
|
||||
seq_no: 0,
|
||||
}));
|
||||
self.writers.write().await.push(rpc_w.clone());
|
||||
|
||||
let reg = self.registry.clone();
|
||||
let w_pong = rpc_w.clone();
|
||||
let w_pool = self.writers_arc();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = reader_loop(rd, rk, read_iv, reg, enc_buf, dec_buf, w_pong.clone()).await {
|
||||
warn!(error = %e, "ME reader ended");
|
||||
}
|
||||
// Remove dead writer from pool
|
||||
let mut ws = w_pool.write().await;
|
||||
ws.retain(|w| !Arc::ptr_eq(w, &w_pong));
|
||||
info!(remaining = ws.len(), "Dead ME writer removed from pool");
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn send_proxy_req(
|
||||
&self,
|
||||
conn_id: u64,
|
||||
client_addr: SocketAddr,
|
||||
our_addr: SocketAddr,
|
||||
data: &[u8],
|
||||
proto_flags: u32,
|
||||
) -> Result<()> {
|
||||
let payload = build_proxy_req_payload(
|
||||
conn_id, client_addr, our_addr, data,
|
||||
self.proxy_tag.as_deref(), proto_flags,
|
||||
);
|
||||
loop {
|
||||
let ws = self.writers.read().await;
|
||||
if ws.is_empty() {
|
||||
return Err(ProxyError::Proxy("All ME connections dead".into()));
|
||||
}
|
||||
let idx = self.rr.fetch_add(1, Ordering::Relaxed) as usize % ws.len();
|
||||
let w = ws[idx].clone();
|
||||
drop(ws);
|
||||
match w.lock().await.send(&payload).await {
|
||||
Ok(()) => return Ok(()),
|
||||
Err(e) => {
|
||||
warn!(error = %e, "ME write failed, removing dead conn");
|
||||
let mut ws = self.writers.write().await;
|
||||
ws.retain(|o| !Arc::ptr_eq(o, &w));
|
||||
if ws.is_empty() {
|
||||
return Err(ProxyError::Proxy("All ME connections dead".into()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send_close(&self, conn_id: u64) -> Result<()> {
|
||||
let ws = self.writers.read().await;
|
||||
if !ws.is_empty() {
|
||||
let w = ws[0].clone();
|
||||
drop(ws);
|
||||
let mut p = Vec::with_capacity(12);
|
||||
p.extend_from_slice(&RPC_CLOSE_EXT_U32.to_le_bytes());
|
||||
p.extend_from_slice(&conn_id.to_le_bytes());
|
||||
if let Err(e) = w.lock().await.send(&p).await {
|
||||
debug!(error = %e, "ME close write failed");
|
||||
let mut ws = self.writers.write().await;
|
||||
ws.retain(|o| !Arc::ptr_eq(o, &w));
|
||||
}
|
||||
}
|
||||
self.registry.unregister(conn_id).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn connection_count(&self) -> usize {
|
||||
self.writers.try_read().map(|w| w.len()).unwrap_or(0)
|
||||
}
|
||||
}
|
||||
|
||||
// ========== Reader Loop ==========
|
||||
|
||||
async fn reader_loop(
|
||||
mut rd: tokio::io::ReadHalf<TcpStream>,
|
||||
dk: [u8; 32],
|
||||
mut div: [u8; 16],
|
||||
reg: Arc<ConnRegistry>,
|
||||
mut enc_leftover: BytesMut,
|
||||
mut dec: BytesMut,
|
||||
writer: Arc<Mutex<RpcWriter>>,
|
||||
) -> Result<()> {
|
||||
let mut raw = enc_leftover;
|
||||
loop {
|
||||
let mut tmp = [0u8; 16384];
|
||||
let n = rd.read(&mut tmp).await.map_err(ProxyError::Io)?;
|
||||
if n == 0 { return Ok(()); }
|
||||
raw.extend_from_slice(&tmp[..n]);
|
||||
|
||||
// Decrypt complete 16-byte blocks
|
||||
let blocks = raw.len() / 16 * 16;
|
||||
if blocks > 0 {
|
||||
let mut new_iv = [0u8; 16];
|
||||
new_iv.copy_from_slice(&raw[blocks - 16..blocks]);
|
||||
let mut chunk = vec![0u8; blocks];
|
||||
chunk.copy_from_slice(&raw[..blocks]);
|
||||
AesCbc::new(dk, div)
|
||||
.decrypt_in_place(&mut chunk)
|
||||
.map_err(|e| ProxyError::Crypto(format!("{}", e)))?;
|
||||
div = new_iv;
|
||||
dec.extend_from_slice(&chunk);
|
||||
let _ = raw.split_to(blocks);
|
||||
}
|
||||
|
||||
// Parse RPC frames
|
||||
while dec.len() >= 12 {
|
||||
let fl = u32::from_le_bytes([dec[0], dec[1], dec[2], dec[3]]) as usize;
|
||||
if fl == 4 { let _ = dec.split_to(4); continue; }
|
||||
if fl < 12 || fl > (1 << 24) {
|
||||
warn!(frame_len = fl, "Invalid RPC frame len");
|
||||
dec.clear();
|
||||
break;
|
||||
}
|
||||
if dec.len() < fl { break; }
|
||||
|
||||
let frame = dec.split_to(fl);
|
||||
let pe = fl - 4;
|
||||
let ec = u32::from_le_bytes([frame[pe], frame[pe+1], frame[pe+2], frame[pe+3]]);
|
||||
if crc32(&frame[..pe]) != ec {
|
||||
warn!("CRC mismatch in data frame");
|
||||
continue;
|
||||
}
|
||||
|
||||
let payload = &frame[8..pe];
|
||||
if payload.len() < 4 { continue; }
|
||||
let pt = u32::from_le_bytes([payload[0], payload[1], payload[2], payload[3]]);
|
||||
let body = &payload[4..];
|
||||
|
||||
if pt == RPC_PROXY_ANS_U32 && body.len() >= 12 {
|
||||
let flags = u32::from_le_bytes(body[0..4].try_into().unwrap());
|
||||
let cid = u64::from_le_bytes(body[4..12].try_into().unwrap());
|
||||
let data = Bytes::copy_from_slice(&body[12..]);
|
||||
trace!(cid, len = data.len(), flags, "ANS");
|
||||
reg.route(cid, MeResponse::Data(data)).await;
|
||||
} else if pt == RPC_SIMPLE_ACK_U32 && body.len() >= 12 {
|
||||
let cid = u64::from_le_bytes(body[0..8].try_into().unwrap());
|
||||
let cfm = u32::from_le_bytes(body[8..12].try_into().unwrap());
|
||||
trace!(cid, cfm, "ACK");
|
||||
reg.route(cid, MeResponse::Ack(cfm)).await;
|
||||
} else if pt == RPC_CLOSE_EXT_U32 && body.len() >= 8 {
|
||||
let cid = u64::from_le_bytes(body[0..8].try_into().unwrap());
|
||||
debug!(cid, "CLOSE_EXT from ME");
|
||||
reg.route(cid, MeResponse::Close).await;
|
||||
reg.unregister(cid).await;
|
||||
} else if pt == RPC_CLOSE_CONN_U32 && body.len() >= 8 {
|
||||
let cid = u64::from_le_bytes(body[0..8].try_into().unwrap());
|
||||
debug!(cid, "CLOSE_CONN from ME");
|
||||
reg.route(cid, MeResponse::Close).await;
|
||||
reg.unregister(cid).await;
|
||||
} else if pt == RPC_PING_U32 && body.len() >= 8 {
|
||||
let ping_id = i64::from_le_bytes(body[0..8].try_into().unwrap());
|
||||
trace!(ping_id, "RPC_PING -> PONG");
|
||||
let mut pong = Vec::with_capacity(12);
|
||||
pong.extend_from_slice(&RPC_PONG_U32.to_le_bytes());
|
||||
pong.extend_from_slice(&ping_id.to_le_bytes());
|
||||
if let Err(e) = writer.lock().await.send(&pong).await {
|
||||
warn!(error = %e, "PONG send failed");
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
debug!(rpc_type = format_args!("0x{:08x}", pt), len = body.len(), "Unknown RPC");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ========== Proto flags ==========
|
||||
|
||||
/// Map ProtoTag to C-compatible RPC_PROXY_REQ transport flags.
|
||||
/// C: RPC_F_COMPACT(0x40000000)=abridged, RPC_F_MEDIUM(0x20000000)=intermediate/secure
|
||||
/// The 0x1000(magic) and 0x8(proxy_tag) are added inside build_proxy_req_payload.
|
||||
|
||||
pub fn proto_flags_for_tag(tag: crate::protocol::constants::ProtoTag) -> u32 {
|
||||
use crate::protocol::constants::*;
|
||||
let mut flags = RPC_FLAG_HAS_AD_TAG | RPC_FLAG_MAGIC | RPC_FLAG_EXTMODE2;
|
||||
match tag {
|
||||
ProtoTag::Abridged => flags | RPC_FLAG_ABRIDGED,
|
||||
ProtoTag::Intermediate => flags | RPC_FLAG_INTERMEDIATE,
|
||||
ProtoTag::Secure => flags | RPC_FLAG_PAD | RPC_FLAG_INTERMEDIATE,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// ========== Health Monitor (Phase 4) ==========
|
||||
|
||||
pub async fn me_health_monitor(
|
||||
pool: Arc<MePool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
min_connections: usize,
|
||||
) {
|
||||
loop {
|
||||
tokio::time::sleep(Duration::from_secs(30)).await;
|
||||
let current = pool.writers.read().await.len();
|
||||
if current < min_connections {
|
||||
warn!(current, min = min_connections, "ME pool below minimum, reconnecting...");
|
||||
let addrs = TG_MIDDLE_PROXIES_FLAT_V4.clone();
|
||||
for &(ip, port) in addrs.iter() {
|
||||
let needed = min_connections.saturating_sub(pool.writers.read().await.len());
|
||||
if needed == 0 { break; }
|
||||
for _ in 0..needed {
|
||||
let addr = SocketAddr::new(ip, port);
|
||||
match pool.connect_one(addr, &rng).await {
|
||||
Ok(()) => info!(%addr, "ME reconnected"),
|
||||
Err(e) => debug!(%addr, error = %e, "ME reconnect failed"),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,16 +1,53 @@
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
|
||||
use crate::crypto::{AesCbc, crc32};
|
||||
use crate::crypto::{AesCbc, crc32, crc32c};
|
||||
use crate::error::{ProxyError, Result};
|
||||
use crate::protocol::constants::*;
|
||||
|
||||
pub(crate) fn build_rpc_frame(seq_no: i32, payload: &[u8]) -> Vec<u8> {
|
||||
/// Commands sent to dedicated writer tasks to avoid mutex contention on TCP writes.
|
||||
pub(crate) enum WriterCommand {
|
||||
Data(Vec<u8>),
|
||||
DataAndFlush(Vec<u8>),
|
||||
Close,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub(crate) enum RpcChecksumMode {
|
||||
Crc32,
|
||||
Crc32c,
|
||||
}
|
||||
|
||||
impl RpcChecksumMode {
|
||||
pub(crate) fn from_handshake_flags(flags: u32) -> Self {
|
||||
if (flags & rpc_crypto_flags::USE_CRC32C) != 0 {
|
||||
Self::Crc32c
|
||||
} else {
|
||||
Self::Crc32
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn advertised_flags(self) -> u32 {
|
||||
match self {
|
||||
Self::Crc32 => 0,
|
||||
Self::Crc32c => rpc_crypto_flags::USE_CRC32C,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn rpc_crc(mode: RpcChecksumMode, data: &[u8]) -> u32 {
|
||||
match mode {
|
||||
RpcChecksumMode::Crc32 => crc32(data),
|
||||
RpcChecksumMode::Crc32c => crc32c(data),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn build_rpc_frame(seq_no: i32, payload: &[u8], crc_mode: RpcChecksumMode) -> Vec<u8> {
|
||||
let total_len = (4 + 4 + payload.len() + 4) as u32;
|
||||
let mut frame = Vec::with_capacity(total_len as usize);
|
||||
frame.extend_from_slice(&total_len.to_le_bytes());
|
||||
frame.extend_from_slice(&seq_no.to_le_bytes());
|
||||
frame.extend_from_slice(payload);
|
||||
let c = crc32(&frame);
|
||||
let c = rpc_crc(crc_mode, &frame);
|
||||
frame.extend_from_slice(&c.to_le_bytes());
|
||||
frame
|
||||
}
|
||||
@@ -37,7 +74,7 @@ pub(crate) async fn read_rpc_frame_plaintext(
|
||||
|
||||
let crc_offset = total_len - 4;
|
||||
let expected_crc = u32::from_le_bytes(full[crc_offset..crc_offset + 4].try_into().unwrap());
|
||||
let actual_crc = crc32(&full[..crc_offset]);
|
||||
let actual_crc = rpc_crc(RpcChecksumMode::Crc32, &full[..crc_offset]);
|
||||
if expected_crc != actual_crc {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"CRC mismatch: 0x{expected_crc:08x} vs 0x{actual_crc:08x}"
|
||||
@@ -87,26 +124,53 @@ pub(crate) fn build_handshake_payload(
|
||||
our_port: u16,
|
||||
peer_ip: [u8; 4],
|
||||
peer_port: u16,
|
||||
flags: u32,
|
||||
) -> [u8; 32] {
|
||||
let mut p = [0u8; 32];
|
||||
p[0..4].copy_from_slice(&RPC_HANDSHAKE_U32.to_le_bytes());
|
||||
p[4..8].copy_from_slice(&flags.to_le_bytes());
|
||||
|
||||
// Keep C memory layout compatibility for PID IPv4 bytes.
|
||||
// process_id sender_pid
|
||||
p[8..12].copy_from_slice(&our_ip);
|
||||
p[12..14].copy_from_slice(&our_port.to_le_bytes());
|
||||
let pid = (std::process::id() & 0xffff) as u16;
|
||||
p[14..16].copy_from_slice(&pid.to_le_bytes());
|
||||
let utime = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs() as u32;
|
||||
p[16..20].copy_from_slice(&utime.to_le_bytes());
|
||||
p[14..16].copy_from_slice(&process_pid16().to_le_bytes());
|
||||
p[16..20].copy_from_slice(&process_utime().to_le_bytes());
|
||||
|
||||
// process_id peer_pid
|
||||
p[20..24].copy_from_slice(&peer_ip);
|
||||
p[24..26].copy_from_slice(&peer_port.to_le_bytes());
|
||||
p[26..28].copy_from_slice(&0u16.to_le_bytes());
|
||||
p[28..32].copy_from_slice(&0u32.to_le_bytes());
|
||||
p
|
||||
}
|
||||
|
||||
pub(crate) fn parse_handshake_flags(payload: &[u8]) -> Result<u32> {
|
||||
if payload.len() != 32 {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"Bad handshake payload len: {}",
|
||||
payload.len()
|
||||
)));
|
||||
}
|
||||
let hs_type = u32::from_le_bytes(payload[0..4].try_into().unwrap());
|
||||
if hs_type != RPC_HANDSHAKE_U32 {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"Expected HANDSHAKE 0x{RPC_HANDSHAKE_U32:08x}, got 0x{hs_type:08x}"
|
||||
)));
|
||||
}
|
||||
Ok(u32::from_le_bytes(payload[4..8].try_into().unwrap()))
|
||||
}
|
||||
|
||||
fn process_pid16() -> u16 {
|
||||
(std::process::id() & 0xffff) as u16
|
||||
}
|
||||
|
||||
fn process_utime() -> u32 {
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs() as u32
|
||||
}
|
||||
|
||||
pub(crate) fn cbc_encrypt_padded(
|
||||
key: &[u8; 32],
|
||||
iv: &[u8; 16],
|
||||
@@ -152,12 +216,13 @@ pub(crate) struct RpcWriter {
|
||||
pub(crate) key: [u8; 32],
|
||||
pub(crate) iv: [u8; 16],
|
||||
pub(crate) seq_no: i32,
|
||||
pub(crate) crc_mode: RpcChecksumMode,
|
||||
}
|
||||
|
||||
impl RpcWriter {
|
||||
pub(crate) async fn send(&mut self, payload: &[u8]) -> Result<()> {
|
||||
let frame = build_rpc_frame(self.seq_no, payload);
|
||||
self.seq_no += 1;
|
||||
let frame = build_rpc_frame(self.seq_no, payload, self.crc_mode);
|
||||
self.seq_no = self.seq_no.wrapping_add(1);
|
||||
|
||||
let pad = (16 - (frame.len() % 16)) % 16;
|
||||
let mut buf = frame;
|
||||
@@ -176,4 +241,9 @@ impl RpcWriter {
|
||||
}
|
||||
self.writer.write_all(&buf).await.map_err(ProxyError::Io)
|
||||
}
|
||||
|
||||
pub(crate) async fn send_and_flush(&mut self, payload: &[u8]) -> Result<()> {
|
||||
self.send(payload).await?;
|
||||
self.writer.flush().await.map_err(ProxyError::Io)
|
||||
}
|
||||
}
|
||||
|
||||
592
src/transport/middle_proxy/config_updater.rs
Normal file
592
src/transport/middle_proxy/config_updater.rs
Normal file
@@ -0,0 +1,592 @@
|
||||
use std::collections::HashMap;
|
||||
use std::hash::{DefaultHasher, Hash, Hasher};
|
||||
use std::net::IpAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use httpdate;
|
||||
use tokio::sync::{mpsc, watch};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::error::Result;
|
||||
|
||||
use super::MePool;
|
||||
use super::rotation::{MeReinitTrigger, enqueue_reinit_trigger};
|
||||
use super::secret::download_proxy_secret_with_max_len;
|
||||
use std::time::SystemTime;
|
||||
|
||||
async fn retry_fetch(url: &str) -> Option<ProxyConfigData> {
|
||||
let delays = [1u64, 5, 15];
|
||||
for (i, d) in delays.iter().enumerate() {
|
||||
match fetch_proxy_config(url).await {
|
||||
Ok(cfg) => return Some(cfg),
|
||||
Err(e) => {
|
||||
if i == delays.len() - 1 {
|
||||
warn!(error = %e, url, "fetch_proxy_config failed");
|
||||
} else {
|
||||
debug!(error = %e, url, "fetch_proxy_config retrying");
|
||||
tokio::time::sleep(Duration::from_secs(*d)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ProxyConfigData {
|
||||
pub map: HashMap<i32, Vec<(IpAddr, u16)>>,
|
||||
pub default_dc: Option<i32>,
|
||||
pub http_status: u16,
|
||||
pub proxy_for_lines: u32,
|
||||
}
|
||||
|
||||
pub fn parse_proxy_config_text(text: &str, http_status: u16) -> ProxyConfigData {
|
||||
let mut map: HashMap<i32, Vec<(IpAddr, u16)>> = HashMap::new();
|
||||
let mut proxy_for_lines: u32 = 0;
|
||||
for line in text.lines() {
|
||||
if let Some((dc, ip, port)) = parse_proxy_line(line) {
|
||||
map.entry(dc).or_default().push((ip, port));
|
||||
proxy_for_lines = proxy_for_lines.saturating_add(1);
|
||||
}
|
||||
}
|
||||
|
||||
let default_dc = text.lines().find_map(|l| {
|
||||
let t = l.trim();
|
||||
if let Some(rest) = t.strip_prefix("default") {
|
||||
return rest.trim().trim_end_matches(';').parse::<i32>().ok();
|
||||
}
|
||||
None
|
||||
});
|
||||
|
||||
ProxyConfigData {
|
||||
map,
|
||||
default_dc,
|
||||
http_status,
|
||||
proxy_for_lines,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn load_proxy_config_cache(path: &str) -> Result<ProxyConfigData> {
|
||||
let text = tokio::fs::read_to_string(path).await.map_err(|e| {
|
||||
crate::error::ProxyError::Proxy(format!("read proxy-config cache '{path}' failed: {e}"))
|
||||
})?;
|
||||
Ok(parse_proxy_config_text(&text, 200))
|
||||
}
|
||||
|
||||
pub async fn save_proxy_config_cache(path: &str, raw_text: &str) -> Result<()> {
|
||||
if let Some(parent) = Path::new(path).parent()
|
||||
&& !parent.as_os_str().is_empty()
|
||||
{
|
||||
tokio::fs::create_dir_all(parent).await.map_err(|e| {
|
||||
crate::error::ProxyError::Proxy(format!(
|
||||
"create proxy-config cache dir '{}' failed: {e}",
|
||||
parent.display()
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
||||
tokio::fs::write(path, raw_text).await.map_err(|e| {
|
||||
crate::error::ProxyError::Proxy(format!("write proxy-config cache '{path}' failed: {e}"))
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn fetch_proxy_config_with_raw(url: &str) -> Result<(ProxyConfigData, String)> {
|
||||
let resp = reqwest::get(url)
|
||||
.await
|
||||
.map_err(|e| crate::error::ProxyError::Proxy(format!("fetch_proxy_config GET failed: {e}")))?
|
||||
;
|
||||
let http_status = resp.status().as_u16();
|
||||
|
||||
if let Some(date) = resp.headers().get(reqwest::header::DATE)
|
||||
&& let Ok(date_str) = date.to_str()
|
||||
&& let Ok(server_time) = httpdate::parse_http_date(date_str)
|
||||
&& let Ok(skew) = SystemTime::now().duration_since(server_time).or_else(|e| {
|
||||
server_time.duration_since(SystemTime::now()).map_err(|_| e)
|
||||
})
|
||||
{
|
||||
let skew_secs = skew.as_secs();
|
||||
if skew_secs > 60 {
|
||||
warn!(skew_secs, "Time skew >60s detected from fetch_proxy_config Date header");
|
||||
} else if skew_secs > 30 {
|
||||
warn!(skew_secs, "Time skew >30s detected from fetch_proxy_config Date header");
|
||||
}
|
||||
}
|
||||
|
||||
let text = resp
|
||||
.text()
|
||||
.await
|
||||
.map_err(|e| crate::error::ProxyError::Proxy(format!("fetch_proxy_config read failed: {e}")))?;
|
||||
let parsed = parse_proxy_config_text(&text, http_status);
|
||||
Ok((parsed, text))
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct StableSnapshot {
|
||||
candidate_hash: Option<u64>,
|
||||
candidate_hits: u8,
|
||||
applied_hash: Option<u64>,
|
||||
}
|
||||
|
||||
impl StableSnapshot {
|
||||
fn observe(&mut self, hash: u64) -> u8 {
|
||||
if self.candidate_hash == Some(hash) {
|
||||
self.candidate_hits = self.candidate_hits.saturating_add(1);
|
||||
} else {
|
||||
self.candidate_hash = Some(hash);
|
||||
self.candidate_hits = 1;
|
||||
}
|
||||
self.candidate_hits
|
||||
}
|
||||
|
||||
fn is_applied(&self, hash: u64) -> bool {
|
||||
self.applied_hash == Some(hash)
|
||||
}
|
||||
|
||||
fn mark_applied(&mut self, hash: u64) {
|
||||
self.applied_hash = Some(hash);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct UpdaterState {
|
||||
config_v4: StableSnapshot,
|
||||
config_v6: StableSnapshot,
|
||||
secret: StableSnapshot,
|
||||
last_map_apply_at: Option<tokio::time::Instant>,
|
||||
}
|
||||
|
||||
fn hash_proxy_config(cfg: &ProxyConfigData) -> u64 {
|
||||
let mut hasher = DefaultHasher::new();
|
||||
cfg.default_dc.hash(&mut hasher);
|
||||
|
||||
let mut by_dc: Vec<(i32, Vec<(IpAddr, u16)>)> =
|
||||
cfg.map.iter().map(|(dc, addrs)| (*dc, addrs.clone())).collect();
|
||||
by_dc.sort_by_key(|(dc, _)| *dc);
|
||||
for (dc, mut addrs) in by_dc {
|
||||
dc.hash(&mut hasher);
|
||||
addrs.sort_unstable();
|
||||
for (ip, port) in addrs {
|
||||
ip.hash(&mut hasher);
|
||||
port.hash(&mut hasher);
|
||||
}
|
||||
}
|
||||
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
fn hash_secret(secret: &[u8]) -> u64 {
|
||||
let mut hasher = DefaultHasher::new();
|
||||
secret.hash(&mut hasher);
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
fn map_apply_cooldown_ready(
|
||||
last_applied: Option<tokio::time::Instant>,
|
||||
cooldown: Duration,
|
||||
) -> bool {
|
||||
if cooldown.is_zero() {
|
||||
return true;
|
||||
}
|
||||
match last_applied {
|
||||
Some(ts) => ts.elapsed() >= cooldown,
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_apply_cooldown_remaining_secs(
|
||||
last_applied: tokio::time::Instant,
|
||||
cooldown: Duration,
|
||||
) -> u64 {
|
||||
if cooldown.is_zero() {
|
||||
return 0;
|
||||
}
|
||||
cooldown
|
||||
.checked_sub(last_applied.elapsed())
|
||||
.map(|d| d.as_secs())
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
fn parse_host_port(s: &str) -> Option<(IpAddr, u16)> {
|
||||
if let Some(bracket_end) = s.rfind(']')
|
||||
&& s.starts_with('[')
|
||||
&& bracket_end + 1 < s.len()
|
||||
&& s.as_bytes().get(bracket_end + 1) == Some(&b':')
|
||||
{
|
||||
let host = &s[1..bracket_end];
|
||||
let port_str = &s[bracket_end + 2..];
|
||||
let ip = host.parse::<IpAddr>().ok()?;
|
||||
let port = port_str.parse::<u16>().ok()?;
|
||||
return Some((ip, port));
|
||||
}
|
||||
|
||||
let idx = s.rfind(':')?;
|
||||
let host = &s[..idx];
|
||||
let port_str = &s[idx + 1..];
|
||||
let ip = host.parse::<IpAddr>().ok()?;
|
||||
let port = port_str.parse::<u16>().ok()?;
|
||||
Some((ip, port))
|
||||
}
|
||||
|
||||
fn parse_proxy_line(line: &str) -> Option<(i32, IpAddr, u16)> {
|
||||
// Accepts lines like:
|
||||
// proxy_for 4 91.108.4.195:8888;
|
||||
// proxy_for 2 [2001:67c:04e8:f002::d]:80;
|
||||
// proxy_for 2 2001:67c:04e8:f002::d:80;
|
||||
let trimmed = line.trim();
|
||||
if !trimmed.starts_with("proxy_for") {
|
||||
return None;
|
||||
}
|
||||
// Capture everything between dc and trailing ';'
|
||||
let without_prefix = trimmed.trim_start_matches("proxy_for").trim();
|
||||
let mut parts = without_prefix.split_whitespace();
|
||||
let dc_str = parts.next()?;
|
||||
let rest = parts.next()?;
|
||||
let host_port = rest.trim_end_matches(';');
|
||||
let dc = dc_str.parse::<i32>().ok()?;
|
||||
let (ip, port) = parse_host_port(host_port)?;
|
||||
Some((dc, ip, port))
|
||||
}
|
||||
|
||||
pub async fn fetch_proxy_config(url: &str) -> Result<ProxyConfigData> {
|
||||
fetch_proxy_config_with_raw(url)
|
||||
.await
|
||||
.map(|(parsed, _raw)| parsed)
|
||||
}
|
||||
|
||||
fn snapshot_passes_guards(
|
||||
cfg: &ProxyConfig,
|
||||
snapshot: &ProxyConfigData,
|
||||
snapshot_name: &'static str,
|
||||
) -> bool {
|
||||
if cfg.general.me_snapshot_require_http_2xx
|
||||
&& !(200..=299).contains(&snapshot.http_status)
|
||||
{
|
||||
warn!(
|
||||
snapshot = snapshot_name,
|
||||
http_status = snapshot.http_status,
|
||||
"ME snapshot rejected by non-2xx HTTP status"
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
let min_proxy_for = cfg.general.me_snapshot_min_proxy_for_lines;
|
||||
if snapshot.proxy_for_lines < min_proxy_for {
|
||||
warn!(
|
||||
snapshot = snapshot_name,
|
||||
parsed_proxy_for_lines = snapshot.proxy_for_lines,
|
||||
min_proxy_for_lines = min_proxy_for,
|
||||
"ME snapshot rejected by proxy_for line floor"
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
async fn run_update_cycle(
|
||||
pool: &Arc<MePool>,
|
||||
cfg: &ProxyConfig,
|
||||
state: &mut UpdaterState,
|
||||
reinit_tx: &mpsc::Sender<MeReinitTrigger>,
|
||||
) {
|
||||
pool.update_runtime_reinit_policy(
|
||||
cfg.general.hardswap,
|
||||
cfg.general.me_pool_drain_ttl_secs,
|
||||
cfg.general.effective_me_pool_force_close_secs(),
|
||||
cfg.general.me_pool_min_fresh_ratio,
|
||||
cfg.general.me_hardswap_warmup_delay_min_ms,
|
||||
cfg.general.me_hardswap_warmup_delay_max_ms,
|
||||
cfg.general.me_hardswap_warmup_extra_passes,
|
||||
cfg.general.me_hardswap_warmup_pass_backoff_base_ms,
|
||||
cfg.general.me_bind_stale_mode,
|
||||
cfg.general.me_bind_stale_ttl_secs,
|
||||
cfg.general.me_secret_atomic_snapshot,
|
||||
cfg.general.me_deterministic_writer_sort,
|
||||
cfg.general.me_single_endpoint_shadow_writers,
|
||||
cfg.general.me_single_endpoint_outage_mode_enabled,
|
||||
cfg.general.me_single_endpoint_outage_disable_quarantine,
|
||||
cfg.general.me_single_endpoint_outage_backoff_min_ms,
|
||||
cfg.general.me_single_endpoint_outage_backoff_max_ms,
|
||||
cfg.general.me_single_endpoint_shadow_rotate_every_secs,
|
||||
cfg.general.me_floor_mode,
|
||||
cfg.general.me_adaptive_floor_idle_secs,
|
||||
cfg.general.me_adaptive_floor_min_writers_single_endpoint,
|
||||
cfg.general.me_adaptive_floor_recover_grace_secs,
|
||||
);
|
||||
|
||||
let required_cfg_snapshots = cfg.general.me_config_stable_snapshots.max(1);
|
||||
let required_secret_snapshots = cfg.general.proxy_secret_stable_snapshots.max(1);
|
||||
let apply_cooldown = Duration::from_secs(cfg.general.me_config_apply_cooldown_secs);
|
||||
let mut maps_changed = false;
|
||||
|
||||
let mut ready_v4: Option<(ProxyConfigData, u64)> = None;
|
||||
let cfg_v4 = retry_fetch("https://core.telegram.org/getProxyConfig").await;
|
||||
if let Some(cfg_v4) = cfg_v4 {
|
||||
if snapshot_passes_guards(cfg, &cfg_v4, "getProxyConfig") {
|
||||
let cfg_v4_hash = hash_proxy_config(&cfg_v4);
|
||||
let stable_hits = state.config_v4.observe(cfg_v4_hash);
|
||||
if stable_hits < required_cfg_snapshots {
|
||||
debug!(
|
||||
stable_hits,
|
||||
required_cfg_snapshots,
|
||||
snapshot = format_args!("0x{cfg_v4_hash:016x}"),
|
||||
"ME config v4 candidate observed"
|
||||
);
|
||||
} else if state.config_v4.is_applied(cfg_v4_hash) {
|
||||
debug!(
|
||||
snapshot = format_args!("0x{cfg_v4_hash:016x}"),
|
||||
"ME config v4 stable snapshot already applied"
|
||||
);
|
||||
} else {
|
||||
ready_v4 = Some((cfg_v4, cfg_v4_hash));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut ready_v6: Option<(ProxyConfigData, u64)> = None;
|
||||
let cfg_v6 = retry_fetch("https://core.telegram.org/getProxyConfigV6").await;
|
||||
if let Some(cfg_v6) = cfg_v6 {
|
||||
if snapshot_passes_guards(cfg, &cfg_v6, "getProxyConfigV6") {
|
||||
let cfg_v6_hash = hash_proxy_config(&cfg_v6);
|
||||
let stable_hits = state.config_v6.observe(cfg_v6_hash);
|
||||
if stable_hits < required_cfg_snapshots {
|
||||
debug!(
|
||||
stable_hits,
|
||||
required_cfg_snapshots,
|
||||
snapshot = format_args!("0x{cfg_v6_hash:016x}"),
|
||||
"ME config v6 candidate observed"
|
||||
);
|
||||
} else if state.config_v6.is_applied(cfg_v6_hash) {
|
||||
debug!(
|
||||
snapshot = format_args!("0x{cfg_v6_hash:016x}"),
|
||||
"ME config v6 stable snapshot already applied"
|
||||
);
|
||||
} else {
|
||||
ready_v6 = Some((cfg_v6, cfg_v6_hash));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ready_v4.is_some() || ready_v6.is_some() {
|
||||
if map_apply_cooldown_ready(state.last_map_apply_at, apply_cooldown) {
|
||||
let update_v4 = ready_v4
|
||||
.as_ref()
|
||||
.map(|(snapshot, _)| snapshot.map.clone())
|
||||
.unwrap_or_default();
|
||||
let update_v6 = ready_v6
|
||||
.as_ref()
|
||||
.map(|(snapshot, _)| snapshot.map.clone());
|
||||
let update_is_empty =
|
||||
update_v4.is_empty() && update_v6.as_ref().is_none_or(|v| v.is_empty());
|
||||
let apply_outcome = if update_is_empty && !cfg.general.me_snapshot_reject_empty_map {
|
||||
super::pool_config::SnapshotApplyOutcome::AppliedNoDelta
|
||||
} else {
|
||||
pool.update_proxy_maps(update_v4, update_v6).await
|
||||
};
|
||||
|
||||
if matches!(
|
||||
apply_outcome,
|
||||
super::pool_config::SnapshotApplyOutcome::RejectedEmpty
|
||||
) {
|
||||
warn!("ME config stable snapshot rejected (empty endpoint map)");
|
||||
} else {
|
||||
if let Some((snapshot, hash)) = ready_v4 {
|
||||
if let Some(dc) = snapshot.default_dc {
|
||||
pool.default_dc
|
||||
.store(dc, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
state.config_v4.mark_applied(hash);
|
||||
}
|
||||
|
||||
if let Some((_snapshot, hash)) = ready_v6 {
|
||||
state.config_v6.mark_applied(hash);
|
||||
}
|
||||
|
||||
state.last_map_apply_at = Some(tokio::time::Instant::now());
|
||||
|
||||
if apply_outcome.changed() {
|
||||
maps_changed = true;
|
||||
info!("ME config update applied after stable-gate");
|
||||
} else {
|
||||
debug!("ME config stable-gate applied with no map delta");
|
||||
}
|
||||
}
|
||||
} else if let Some(last) = state.last_map_apply_at {
|
||||
let wait_secs = map_apply_cooldown_remaining_secs(last, apply_cooldown);
|
||||
debug!(
|
||||
wait_secs,
|
||||
"ME config stable snapshot deferred by cooldown"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if maps_changed {
|
||||
enqueue_reinit_trigger(reinit_tx, MeReinitTrigger::MapChanged);
|
||||
}
|
||||
|
||||
pool.reset_stun_state();
|
||||
|
||||
if cfg.general.proxy_secret_rotate_runtime {
|
||||
match download_proxy_secret_with_max_len(cfg.general.proxy_secret_len_max).await {
|
||||
Ok(secret) => {
|
||||
let secret_hash = hash_secret(&secret);
|
||||
let stable_hits = state.secret.observe(secret_hash);
|
||||
if stable_hits < required_secret_snapshots {
|
||||
debug!(
|
||||
stable_hits,
|
||||
required_secret_snapshots,
|
||||
snapshot = format_args!("0x{secret_hash:016x}"),
|
||||
"proxy-secret candidate observed"
|
||||
);
|
||||
} else if state.secret.is_applied(secret_hash) {
|
||||
debug!(
|
||||
snapshot = format_args!("0x{secret_hash:016x}"),
|
||||
"proxy-secret stable snapshot already applied"
|
||||
);
|
||||
} else {
|
||||
let rotated = pool.update_secret(secret).await;
|
||||
state.secret.mark_applied(secret_hash);
|
||||
if rotated {
|
||||
info!("proxy-secret rotated after stable-gate");
|
||||
} else {
|
||||
debug!("proxy-secret stable snapshot confirmed as unchanged");
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => warn!(error = %e, "proxy-secret update failed"),
|
||||
}
|
||||
} else {
|
||||
debug!("proxy-secret runtime rotation disabled by config");
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn me_config_updater(
|
||||
pool: Arc<MePool>,
|
||||
mut config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
reinit_tx: mpsc::Sender<MeReinitTrigger>,
|
||||
) {
|
||||
let mut state = UpdaterState::default();
|
||||
let mut update_every_secs = config_rx
|
||||
.borrow()
|
||||
.general
|
||||
.effective_update_every_secs()
|
||||
.max(1);
|
||||
let mut update_every = Duration::from_secs(update_every_secs);
|
||||
let mut next_tick = tokio::time::Instant::now() + update_every;
|
||||
info!(update_every_secs, "ME config updater started");
|
||||
|
||||
loop {
|
||||
let sleep = tokio::time::sleep_until(next_tick);
|
||||
tokio::pin!(sleep);
|
||||
|
||||
tokio::select! {
|
||||
_ = &mut sleep => {
|
||||
let cfg = config_rx.borrow().clone();
|
||||
run_update_cycle(&pool, cfg.as_ref(), &mut state, &reinit_tx).await;
|
||||
let refreshed_secs = cfg.general.effective_update_every_secs().max(1);
|
||||
if refreshed_secs != update_every_secs {
|
||||
info!(
|
||||
old_update_every_secs = update_every_secs,
|
||||
new_update_every_secs = refreshed_secs,
|
||||
"ME config updater interval changed"
|
||||
);
|
||||
update_every_secs = refreshed_secs;
|
||||
update_every = Duration::from_secs(update_every_secs);
|
||||
}
|
||||
next_tick = tokio::time::Instant::now() + update_every;
|
||||
}
|
||||
changed = config_rx.changed() => {
|
||||
if changed.is_err() {
|
||||
warn!("ME config updater stopped: config channel closed");
|
||||
break;
|
||||
}
|
||||
let cfg = config_rx.borrow().clone();
|
||||
pool.update_runtime_reinit_policy(
|
||||
cfg.general.hardswap,
|
||||
cfg.general.me_pool_drain_ttl_secs,
|
||||
cfg.general.effective_me_pool_force_close_secs(),
|
||||
cfg.general.me_pool_min_fresh_ratio,
|
||||
cfg.general.me_hardswap_warmup_delay_min_ms,
|
||||
cfg.general.me_hardswap_warmup_delay_max_ms,
|
||||
cfg.general.me_hardswap_warmup_extra_passes,
|
||||
cfg.general.me_hardswap_warmup_pass_backoff_base_ms,
|
||||
cfg.general.me_bind_stale_mode,
|
||||
cfg.general.me_bind_stale_ttl_secs,
|
||||
cfg.general.me_secret_atomic_snapshot,
|
||||
cfg.general.me_deterministic_writer_sort,
|
||||
cfg.general.me_single_endpoint_shadow_writers,
|
||||
cfg.general.me_single_endpoint_outage_mode_enabled,
|
||||
cfg.general.me_single_endpoint_outage_disable_quarantine,
|
||||
cfg.general.me_single_endpoint_outage_backoff_min_ms,
|
||||
cfg.general.me_single_endpoint_outage_backoff_max_ms,
|
||||
cfg.general.me_single_endpoint_shadow_rotate_every_secs,
|
||||
cfg.general.me_floor_mode,
|
||||
cfg.general.me_adaptive_floor_idle_secs,
|
||||
cfg.general.me_adaptive_floor_min_writers_single_endpoint,
|
||||
cfg.general.me_adaptive_floor_recover_grace_secs,
|
||||
);
|
||||
let new_secs = cfg.general.effective_update_every_secs().max(1);
|
||||
if new_secs == update_every_secs {
|
||||
continue;
|
||||
}
|
||||
|
||||
if new_secs < update_every_secs {
|
||||
info!(
|
||||
old_update_every_secs = update_every_secs,
|
||||
new_update_every_secs = new_secs,
|
||||
"ME config updater interval decreased, running immediate refresh"
|
||||
);
|
||||
update_every_secs = new_secs;
|
||||
update_every = Duration::from_secs(update_every_secs);
|
||||
run_update_cycle(&pool, cfg.as_ref(), &mut state, &reinit_tx).await;
|
||||
next_tick = tokio::time::Instant::now() + update_every;
|
||||
} else {
|
||||
info!(
|
||||
old_update_every_secs = update_every_secs,
|
||||
new_update_every_secs = new_secs,
|
||||
"ME config updater interval increased"
|
||||
);
|
||||
update_every_secs = new_secs;
|
||||
update_every = Duration::from_secs(update_every_secs);
|
||||
next_tick = tokio::time::Instant::now() + update_every;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn parse_ipv6_bracketed() {
|
||||
let line = "proxy_for 2 [2001:67c:04e8:f002::d]:80;";
|
||||
let res = parse_proxy_line(line).unwrap();
|
||||
assert_eq!(res.0, 2);
|
||||
assert_eq!(res.1, "2001:67c:04e8:f002::d".parse::<IpAddr>().unwrap());
|
||||
assert_eq!(res.2, 80);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_ipv6_plain() {
|
||||
let line = "proxy_for 2 2001:67c:04e8:f002::d:80;";
|
||||
let res = parse_proxy_line(line).unwrap();
|
||||
assert_eq!(res.0, 2);
|
||||
assert_eq!(res.1, "2001:67c:04e8:f002::d".parse::<IpAddr>().unwrap());
|
||||
assert_eq!(res.2, 80);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_ipv4() {
|
||||
let line = "proxy_for 4 91.108.4.195:8888;";
|
||||
let res = parse_proxy_line(line).unwrap();
|
||||
assert_eq!(res.0, 4);
|
||||
assert_eq!(res.1, "91.108.4.195".parse::<IpAddr>().unwrap());
|
||||
assert_eq!(res.2, 8888);
|
||||
}
|
||||
}
|
||||
653
src/transport/middle_proxy/handshake.rs
Normal file
653
src/transport/middle_proxy/handshake.rs
Normal file
@@ -0,0 +1,653 @@
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::{Duration, Instant};
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
use socket2::{SockRef, TcpKeepalive};
|
||||
#[cfg(target_os = "linux")]
|
||||
use libc;
|
||||
#[cfg(target_os = "linux")]
|
||||
use std::os::fd::{AsRawFd, RawFd};
|
||||
#[cfg(target_os = "linux")]
|
||||
use std::os::raw::c_int;
|
||||
|
||||
use bytes::BytesMut;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, ReadHalf, WriteHalf};
|
||||
use tokio::net::{TcpStream, TcpSocket};
|
||||
use tokio::time::timeout;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::config::MeSocksKdfPolicy;
|
||||
use crate::crypto::{SecureRandom, build_middleproxy_prekey, derive_middleproxy_keys, sha256};
|
||||
use crate::error::{ProxyError, Result};
|
||||
use crate::network::IpFamily;
|
||||
use crate::network::probe::is_bogon;
|
||||
use crate::protocol::constants::{
|
||||
ME_CONNECT_TIMEOUT_SECS, ME_HANDSHAKE_TIMEOUT_SECS, RPC_CRYPTO_AES_U32,
|
||||
RPC_HANDSHAKE_ERROR_U32, rpc_crypto_flags,
|
||||
};
|
||||
use crate::transport::{UpstreamEgressInfo, UpstreamRouteKind};
|
||||
|
||||
use super::codec::{
|
||||
RpcChecksumMode, build_handshake_payload, build_nonce_payload, build_rpc_frame,
|
||||
cbc_decrypt_inplace, cbc_encrypt_padded, parse_handshake_flags, parse_nonce_payload,
|
||||
read_rpc_frame_plaintext, rpc_crc,
|
||||
};
|
||||
use super::wire::{extract_ip_material, IpMaterial};
|
||||
use super::MePool;
|
||||
|
||||
const ME_KDF_DRIFT_STRICT: bool = false;
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
enum KdfClientPortSource {
|
||||
LocalSocket = 0,
|
||||
SocksBound = 1,
|
||||
}
|
||||
|
||||
impl KdfClientPortSource {
|
||||
fn from_socks_bound_port(socks_bound_port: Option<u16>) -> Self {
|
||||
if socks_bound_port.is_some() {
|
||||
Self::SocksBound
|
||||
} else {
|
||||
Self::LocalSocket
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Result of a successful ME handshake with timings.
|
||||
pub(crate) struct HandshakeOutput {
|
||||
pub rd: ReadHalf<TcpStream>,
|
||||
pub wr: WriteHalf<TcpStream>,
|
||||
pub read_key: [u8; 32],
|
||||
pub read_iv: [u8; 16],
|
||||
pub write_key: [u8; 32],
|
||||
pub write_iv: [u8; 16],
|
||||
pub crc_mode: RpcChecksumMode,
|
||||
pub handshake_ms: f64,
|
||||
}
|
||||
|
||||
impl MePool {
|
||||
fn kdf_material_fingerprint(
|
||||
local_ip_nat: IpAddr,
|
||||
peer_addr_nat: SocketAddr,
|
||||
reflected_ip: Option<IpAddr>,
|
||||
socks_bound_ip: Option<IpAddr>,
|
||||
client_port_source: KdfClientPortSource,
|
||||
) -> u64 {
|
||||
let mut hasher = DefaultHasher::new();
|
||||
local_ip_nat.hash(&mut hasher);
|
||||
peer_addr_nat.hash(&mut hasher);
|
||||
reflected_ip.hash(&mut hasher);
|
||||
socks_bound_ip.hash(&mut hasher);
|
||||
client_port_source.hash(&mut hasher);
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
async fn resolve_dc_idx_for_endpoint(&self, addr: SocketAddr) -> Option<i16> {
|
||||
i16::try_from(self.resolve_dc_for_endpoint(addr).await).ok()
|
||||
}
|
||||
|
||||
fn direct_bind_ip_for_stun(
|
||||
family: IpFamily,
|
||||
upstream_egress: Option<UpstreamEgressInfo>,
|
||||
) -> Option<IpAddr> {
|
||||
let info = upstream_egress?;
|
||||
if info.route_kind != UpstreamRouteKind::Direct {
|
||||
return None;
|
||||
}
|
||||
match (family, info.direct_bind_ip) {
|
||||
(IpFamily::V4, Some(IpAddr::V4(ip))) => Some(IpAddr::V4(ip)),
|
||||
(IpFamily::V6, Some(IpAddr::V6(ip))) => Some(IpAddr::V6(ip)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn select_socks_bound_addr(
|
||||
family: IpFamily,
|
||||
upstream_egress: Option<UpstreamEgressInfo>,
|
||||
) -> Option<SocketAddr> {
|
||||
let info = upstream_egress?;
|
||||
if !matches!(
|
||||
info.route_kind,
|
||||
UpstreamRouteKind::Socks4 | UpstreamRouteKind::Socks5
|
||||
) {
|
||||
return None;
|
||||
}
|
||||
let bound = info.socks_bound_addr?;
|
||||
let family_matches = matches!(
|
||||
(family, bound.ip()),
|
||||
(IpFamily::V4, IpAddr::V4(_)) | (IpFamily::V6, IpAddr::V6(_))
|
||||
);
|
||||
if !family_matches || is_bogon(bound.ip()) || bound.ip().is_unspecified() {
|
||||
return None;
|
||||
}
|
||||
Some(bound)
|
||||
}
|
||||
|
||||
fn is_socks_route(upstream_egress: Option<UpstreamEgressInfo>) -> bool {
|
||||
matches!(
|
||||
upstream_egress.map(|info| info.route_kind),
|
||||
Some(UpstreamRouteKind::Socks4 | UpstreamRouteKind::Socks5)
|
||||
)
|
||||
}
|
||||
|
||||
/// TCP connect with timeout + return RTT in milliseconds.
|
||||
pub(crate) async fn connect_tcp(
|
||||
&self,
|
||||
addr: SocketAddr,
|
||||
) -> Result<(TcpStream, f64, Option<UpstreamEgressInfo>)> {
|
||||
let start = Instant::now();
|
||||
let (stream, upstream_egress) = if let Some(upstream) = &self.upstream {
|
||||
let dc_idx = self.resolve_dc_idx_for_endpoint(addr).await;
|
||||
let (stream, egress) = upstream.connect_with_details(addr, dc_idx, None).await?;
|
||||
(stream, Some(egress))
|
||||
} else {
|
||||
let connect_fut = async {
|
||||
if addr.is_ipv6()
|
||||
&& let Some(v6) = self.detected_ipv6
|
||||
{
|
||||
match TcpSocket::new_v6() {
|
||||
Ok(sock) => {
|
||||
if let Err(e) = sock.bind(SocketAddr::new(IpAddr::V6(v6), 0)) {
|
||||
debug!(error = %e, bind_ip = %v6, "ME IPv6 bind failed, falling back to default bind");
|
||||
} else {
|
||||
match sock.connect(addr).await {
|
||||
Ok(stream) => return Ok(stream),
|
||||
Err(e) => debug!(error = %e, target = %addr, "ME IPv6 bound connect failed, retrying default connect"),
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => debug!(error = %e, "ME IPv6 socket creation failed, falling back to default connect"),
|
||||
}
|
||||
}
|
||||
TcpStream::connect(addr).await
|
||||
};
|
||||
|
||||
let stream = timeout(Duration::from_secs(ME_CONNECT_TIMEOUT_SECS), connect_fut)
|
||||
.await
|
||||
.map_err(|_| ProxyError::ConnectionTimeout {
|
||||
addr: addr.to_string(),
|
||||
})??;
|
||||
(stream, None)
|
||||
};
|
||||
|
||||
let connect_ms = start.elapsed().as_secs_f64() * 1000.0;
|
||||
stream.set_nodelay(true).ok();
|
||||
if let Err(e) = Self::configure_keepalive(&stream) {
|
||||
warn!(error = %e, "ME keepalive setup failed");
|
||||
}
|
||||
#[cfg(target_os = "linux")]
|
||||
if let Err(e) = Self::configure_user_timeout(stream.as_raw_fd()) {
|
||||
warn!(error = %e, "ME TCP_USER_TIMEOUT setup failed");
|
||||
}
|
||||
Ok((stream, connect_ms, upstream_egress))
|
||||
}
|
||||
|
||||
fn configure_keepalive(stream: &TcpStream) -> std::io::Result<()> {
|
||||
let sock = SockRef::from(stream);
|
||||
let ka = TcpKeepalive::new()
|
||||
.with_time(Duration::from_secs(30))
|
||||
.with_interval(Duration::from_secs(10))
|
||||
.with_retries(3);
|
||||
sock.set_tcp_keepalive(&ka)?;
|
||||
sock.set_keepalive(true)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
fn configure_user_timeout(fd: RawFd) -> std::io::Result<()> {
|
||||
let timeout_ms: c_int = 30_000;
|
||||
let rc = unsafe {
|
||||
libc::setsockopt(
|
||||
fd,
|
||||
libc::IPPROTO_TCP,
|
||||
libc::TCP_USER_TIMEOUT,
|
||||
&timeout_ms as *const _ as *const libc::c_void,
|
||||
std::mem::size_of_val(&timeout_ms) as libc::socklen_t,
|
||||
)
|
||||
};
|
||||
if rc != 0 {
|
||||
return Err(std::io::Error::last_os_error());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Perform full ME RPC handshake on an established TCP stream.
|
||||
/// Returns cipher keys/ivs and split halves; does not register writer.
|
||||
pub(crate) async fn handshake_only(
|
||||
&self,
|
||||
stream: TcpStream,
|
||||
addr: SocketAddr,
|
||||
upstream_egress: Option<UpstreamEgressInfo>,
|
||||
rng: &SecureRandom,
|
||||
) -> Result<HandshakeOutput> {
|
||||
let hs_start = Instant::now();
|
||||
|
||||
let local_addr = stream.local_addr().map_err(ProxyError::Io)?;
|
||||
let transport_peer_addr = stream.peer_addr().map_err(ProxyError::Io)?;
|
||||
let peer_addr = addr;
|
||||
|
||||
let _ = self.maybe_detect_nat_ip(local_addr.ip()).await;
|
||||
let family = if local_addr.ip().is_ipv4() {
|
||||
IpFamily::V4
|
||||
} else {
|
||||
IpFamily::V6
|
||||
};
|
||||
let is_socks_route = Self::is_socks_route(upstream_egress);
|
||||
let socks_bound_addr = Self::select_socks_bound_addr(family, upstream_egress);
|
||||
let reflected = if let Some(bound) = socks_bound_addr {
|
||||
Some(bound)
|
||||
} else if is_socks_route {
|
||||
match self.socks_kdf_policy() {
|
||||
MeSocksKdfPolicy::Strict => {
|
||||
self.stats.increment_me_socks_kdf_strict_reject();
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
"SOCKS route returned no valid BND.ADDR for ME KDF (strict policy)"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
MeSocksKdfPolicy::Compat => {
|
||||
self.stats.increment_me_socks_kdf_compat_fallback();
|
||||
if self.nat_probe {
|
||||
let bind_ip = Self::direct_bind_ip_for_stun(family, upstream_egress);
|
||||
self.maybe_reflect_public_addr(family, bind_ip).await
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if self.nat_probe {
|
||||
let bind_ip = Self::direct_bind_ip_for_stun(family, upstream_egress);
|
||||
self.maybe_reflect_public_addr(family, bind_ip).await
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let local_addr_nat = self.translate_our_addr_with_reflection(local_addr, reflected);
|
||||
let peer_addr_nat = SocketAddr::new(self.translate_ip_for_nat(peer_addr.ip()), peer_addr.port());
|
||||
let (mut rd, mut wr) = tokio::io::split(stream);
|
||||
|
||||
let my_nonce: [u8; 16] = rng.bytes(16).try_into().unwrap();
|
||||
let crypto_ts = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs() as u32;
|
||||
|
||||
let secret_atomic_snapshot = self.secret_atomic_snapshot.load(Ordering::Relaxed);
|
||||
let (ks, secret) = if secret_atomic_snapshot {
|
||||
let snapshot = self.secret_snapshot().await;
|
||||
(snapshot.key_selector, snapshot.secret)
|
||||
} else {
|
||||
// Backward-compatible mode: key selector and secret may come from different updates.
|
||||
let key_selector = self.key_selector().await;
|
||||
let secret = self.secret_snapshot().await.secret;
|
||||
(key_selector, secret)
|
||||
};
|
||||
let nonce_payload = build_nonce_payload(ks, crypto_ts, &my_nonce);
|
||||
let nonce_frame = build_rpc_frame(-2, &nonce_payload, RpcChecksumMode::Crc32);
|
||||
let dump = hex_dump(&nonce_frame[..nonce_frame.len().min(44)]);
|
||||
debug!(
|
||||
key_selector = format_args!("0x{ks:08x}"),
|
||||
crypto_ts,
|
||||
frame_len = nonce_frame.len(),
|
||||
nonce_frame_hex = %dump,
|
||||
"Sending ME nonce frame"
|
||||
);
|
||||
wr.write_all(&nonce_frame).await.map_err(ProxyError::Io)?;
|
||||
wr.flush().await.map_err(ProxyError::Io)?;
|
||||
|
||||
let (srv_seq, srv_nonce_payload) = timeout(
|
||||
Duration::from_secs(ME_HANDSHAKE_TIMEOUT_SECS),
|
||||
read_rpc_frame_plaintext(&mut rd),
|
||||
)
|
||||
.await
|
||||
.map_err(|_| ProxyError::TgHandshakeTimeout)??;
|
||||
|
||||
if srv_seq != -2 {
|
||||
return Err(ProxyError::InvalidHandshake(format!("Expected seq=-2, got {srv_seq}")));
|
||||
}
|
||||
|
||||
let (srv_key_select, schema, srv_ts, srv_nonce) = parse_nonce_payload(&srv_nonce_payload)?;
|
||||
if schema != RPC_CRYPTO_AES_U32 {
|
||||
warn!(schema = format_args!("0x{schema:08x}"), "Unsupported ME crypto schema");
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"Unsupported crypto schema: 0x{schema:x}"
|
||||
)));
|
||||
}
|
||||
|
||||
if srv_key_select != ks {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"Server key_select 0x{srv_key_select:08x} != client 0x{ks:08x}"
|
||||
)));
|
||||
}
|
||||
|
||||
let skew = crypto_ts.abs_diff(srv_ts);
|
||||
if skew > 30 {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"nonce crypto_ts skew too large: client={crypto_ts}, server={srv_ts}, skew={skew}s"
|
||||
)));
|
||||
}
|
||||
|
||||
info!(
|
||||
%local_addr,
|
||||
%local_addr_nat,
|
||||
reflected_ip = reflected.map(|r| r.ip()).as_ref().map(ToString::to_string),
|
||||
%peer_addr,
|
||||
%transport_peer_addr,
|
||||
%peer_addr_nat,
|
||||
socks_bound_addr = socks_bound_addr.map(|v| v.to_string()),
|
||||
key_selector = format_args!("0x{ks:08x}"),
|
||||
crypto_schema = format_args!("0x{schema:08x}"),
|
||||
skew_secs = skew,
|
||||
"ME key derivation parameters"
|
||||
);
|
||||
|
||||
let ts_bytes = crypto_ts.to_le_bytes();
|
||||
let server_port_bytes = peer_addr_nat.port().to_le_bytes();
|
||||
let socks_bound_port = socks_bound_addr
|
||||
.map(|bound| bound.port())
|
||||
.filter(|port| *port != 0);
|
||||
let client_port_for_kdf = socks_bound_port.unwrap_or(local_addr_nat.port());
|
||||
let client_port_source = KdfClientPortSource::from_socks_bound_port(socks_bound_port);
|
||||
let kdf_fingerprint = Self::kdf_material_fingerprint(
|
||||
local_addr_nat.ip(),
|
||||
peer_addr_nat,
|
||||
reflected.map(|value| value.ip()),
|
||||
socks_bound_addr.map(|value| value.ip()),
|
||||
client_port_source,
|
||||
);
|
||||
let previous_kdf_fingerprint = {
|
||||
let kdf_fingerprint_guard = self.kdf_material_fingerprint.read().await;
|
||||
kdf_fingerprint_guard.get(&peer_addr_nat).copied()
|
||||
};
|
||||
if let Some((prev_fingerprint, prev_client_port)) = previous_kdf_fingerprint
|
||||
{
|
||||
if prev_fingerprint != kdf_fingerprint {
|
||||
self.stats.increment_me_kdf_drift_total();
|
||||
warn!(
|
||||
%peer_addr_nat,
|
||||
%local_addr_nat,
|
||||
client_port_for_kdf,
|
||||
client_port_source = ?client_port_source,
|
||||
"ME KDF material drift detected for endpoint"
|
||||
);
|
||||
if ME_KDF_DRIFT_STRICT {
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
"ME KDF material drift detected (strict mode)".to_string(),
|
||||
));
|
||||
}
|
||||
} else if prev_client_port != client_port_for_kdf {
|
||||
self.stats.increment_me_kdf_port_only_drift_total();
|
||||
debug!(
|
||||
%peer_addr_nat,
|
||||
previous_client_port_for_kdf = prev_client_port,
|
||||
client_port_for_kdf,
|
||||
client_port_source = ?client_port_source,
|
||||
"ME KDF client port changed with stable material"
|
||||
);
|
||||
}
|
||||
}
|
||||
// Keep fingerprint updates eventually consistent for diagnostics while avoiding
|
||||
// serializing all concurrent handshakes on a single async mutex.
|
||||
let mut kdf_fingerprint_guard = self.kdf_material_fingerprint.write().await;
|
||||
kdf_fingerprint_guard.insert(peer_addr_nat, (kdf_fingerprint, client_port_for_kdf));
|
||||
drop(kdf_fingerprint_guard);
|
||||
|
||||
let client_port_bytes = client_port_for_kdf.to_le_bytes();
|
||||
|
||||
let server_ip = extract_ip_material(peer_addr_nat);
|
||||
let client_ip = extract_ip_material(local_addr_nat);
|
||||
|
||||
let (srv_ip_opt, clt_ip_opt, clt_v6_opt, srv_v6_opt, hs_our_ip, hs_peer_ip) = match (server_ip, client_ip) {
|
||||
(IpMaterial::V4(mut srv), IpMaterial::V4(mut clt)) => {
|
||||
srv.reverse();
|
||||
clt.reverse();
|
||||
(Some(srv), Some(clt), None, None, clt, srv)
|
||||
}
|
||||
(IpMaterial::V6(srv), IpMaterial::V6(clt)) => {
|
||||
let zero = [0u8; 4];
|
||||
(None, None, Some(clt), Some(srv), zero, zero)
|
||||
}
|
||||
_ => {
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
"mixed IPv4/IPv6 endpoints are not supported for ME key derivation".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let diag_level: u8 = std::env::var("ME_DIAG").ok().and_then(|v| v.parse().ok()).unwrap_or(0);
|
||||
|
||||
let prekey_client = build_middleproxy_prekey(
|
||||
&srv_nonce,
|
||||
&my_nonce,
|
||||
&ts_bytes,
|
||||
srv_ip_opt.as_ref().map(|x| &x[..]),
|
||||
&client_port_bytes,
|
||||
b"CLIENT",
|
||||
clt_ip_opt.as_ref().map(|x| &x[..]),
|
||||
&server_port_bytes,
|
||||
&secret,
|
||||
clt_v6_opt.as_ref(),
|
||||
srv_v6_opt.as_ref(),
|
||||
);
|
||||
let prekey_server = build_middleproxy_prekey(
|
||||
&srv_nonce,
|
||||
&my_nonce,
|
||||
&ts_bytes,
|
||||
srv_ip_opt.as_ref().map(|x| &x[..]),
|
||||
&client_port_bytes,
|
||||
b"SERVER",
|
||||
clt_ip_opt.as_ref().map(|x| &x[..]),
|
||||
&server_port_bytes,
|
||||
&secret,
|
||||
clt_v6_opt.as_ref(),
|
||||
srv_v6_opt.as_ref(),
|
||||
);
|
||||
|
||||
let (wk, wi) = derive_middleproxy_keys(
|
||||
&srv_nonce,
|
||||
&my_nonce,
|
||||
&ts_bytes,
|
||||
srv_ip_opt.as_ref().map(|x| &x[..]),
|
||||
&client_port_bytes,
|
||||
b"CLIENT",
|
||||
clt_ip_opt.as_ref().map(|x| &x[..]),
|
||||
&server_port_bytes,
|
||||
&secret,
|
||||
clt_v6_opt.as_ref(),
|
||||
srv_v6_opt.as_ref(),
|
||||
);
|
||||
let (rk, ri) = derive_middleproxy_keys(
|
||||
&srv_nonce,
|
||||
&my_nonce,
|
||||
&ts_bytes,
|
||||
srv_ip_opt.as_ref().map(|x| &x[..]),
|
||||
&client_port_bytes,
|
||||
b"SERVER",
|
||||
clt_ip_opt.as_ref().map(|x| &x[..]),
|
||||
&server_port_bytes,
|
||||
&secret,
|
||||
clt_v6_opt.as_ref(),
|
||||
srv_v6_opt.as_ref(),
|
||||
);
|
||||
|
||||
let requested_crc_mode = RpcChecksumMode::Crc32c;
|
||||
let hs_payload = build_handshake_payload(
|
||||
hs_our_ip,
|
||||
local_addr.port(),
|
||||
hs_peer_ip,
|
||||
peer_addr.port(),
|
||||
requested_crc_mode.advertised_flags(),
|
||||
);
|
||||
let hs_frame = build_rpc_frame(-1, &hs_payload, RpcChecksumMode::Crc32);
|
||||
if diag_level >= 1 {
|
||||
info!(
|
||||
write_key = %hex_dump(&wk),
|
||||
write_iv = %hex_dump(&wi),
|
||||
read_key = %hex_dump(&rk),
|
||||
read_iv = %hex_dump(&ri),
|
||||
srv_ip = %srv_ip_opt.map(|ip| hex_dump(&ip)).unwrap_or_default(),
|
||||
clt_ip = %clt_ip_opt.map(|ip| hex_dump(&ip)).unwrap_or_default(),
|
||||
srv_port = %hex_dump(&server_port_bytes),
|
||||
clt_port = %hex_dump(&client_port_bytes),
|
||||
crypto_ts = %hex_dump(&ts_bytes),
|
||||
nonce_srv = %hex_dump(&srv_nonce),
|
||||
nonce_clt = %hex_dump(&my_nonce),
|
||||
prekey_sha256_client = %hex_dump(&sha256(&prekey_client)),
|
||||
prekey_sha256_server = %hex_dump(&sha256(&prekey_server)),
|
||||
hs_plain = %hex_dump(&hs_frame),
|
||||
proxy_secret_sha256 = %hex_dump(&sha256(&secret)),
|
||||
"ME diag: derived keys and handshake plaintext"
|
||||
);
|
||||
}
|
||||
if diag_level >= 2 {
|
||||
info!(
|
||||
prekey_client = %hex_dump(&prekey_client),
|
||||
prekey_server = %hex_dump(&prekey_server),
|
||||
"ME diag: full prekey buffers"
|
||||
);
|
||||
}
|
||||
|
||||
let (encrypted_hs, write_iv) = cbc_encrypt_padded(&wk, &wi, &hs_frame)?;
|
||||
if diag_level >= 1 {
|
||||
info!(
|
||||
hs_cipher = %hex_dump(&encrypted_hs),
|
||||
"ME diag: handshake ciphertext"
|
||||
);
|
||||
}
|
||||
wr.write_all(&encrypted_hs).await.map_err(ProxyError::Io)?;
|
||||
wr.flush().await.map_err(ProxyError::Io)?;
|
||||
|
||||
let deadline = Instant::now() + Duration::from_secs(ME_HANDSHAKE_TIMEOUT_SECS);
|
||||
let mut enc_buf = BytesMut::with_capacity(256);
|
||||
let mut dec_buf = BytesMut::with_capacity(256);
|
||||
let mut read_iv = ri;
|
||||
let mut negotiated_crc_mode = RpcChecksumMode::Crc32;
|
||||
let mut handshake_ok = false;
|
||||
|
||||
while Instant::now() < deadline && !handshake_ok {
|
||||
let remaining = deadline - Instant::now();
|
||||
let mut tmp = [0u8; 256];
|
||||
let n = match timeout(remaining, rd.read(&mut tmp)).await {
|
||||
Ok(Ok(0)) => {
|
||||
return Err(ProxyError::Io(std::io::Error::new(
|
||||
std::io::ErrorKind::UnexpectedEof,
|
||||
"ME closed during handshake",
|
||||
)));
|
||||
}
|
||||
Ok(Ok(n)) => n,
|
||||
Ok(Err(e)) => return Err(ProxyError::Io(e)),
|
||||
Err(_) => return Err(ProxyError::TgHandshakeTimeout),
|
||||
};
|
||||
|
||||
enc_buf.extend_from_slice(&tmp[..n]);
|
||||
|
||||
let blocks = enc_buf.len() / 16 * 16;
|
||||
if blocks > 0 {
|
||||
let mut chunk = vec![0u8; blocks];
|
||||
chunk.copy_from_slice(&enc_buf[..blocks]);
|
||||
read_iv = cbc_decrypt_inplace(&rk, &read_iv, &mut chunk)?;
|
||||
dec_buf.extend_from_slice(&chunk);
|
||||
let _ = enc_buf.split_to(blocks);
|
||||
}
|
||||
|
||||
while dec_buf.len() >= 4 {
|
||||
let fl = u32::from_le_bytes(dec_buf[0..4].try_into().unwrap()) as usize;
|
||||
|
||||
if fl == 4 {
|
||||
let _ = dec_buf.split_to(4);
|
||||
continue;
|
||||
}
|
||||
if !(12..=(1 << 24)).contains(&fl) {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"Bad HS response frame len: {fl}"
|
||||
)));
|
||||
}
|
||||
if dec_buf.len() < fl {
|
||||
break;
|
||||
}
|
||||
|
||||
let frame = dec_buf.split_to(fl);
|
||||
let pe = fl - 4;
|
||||
let ec = u32::from_le_bytes(frame[pe..pe + 4].try_into().unwrap());
|
||||
let ac = rpc_crc(RpcChecksumMode::Crc32, &frame[..pe]);
|
||||
if ec != ac {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"HS CRC mismatch: 0x{ec:08x} vs 0x{ac:08x}"
|
||||
)));
|
||||
}
|
||||
|
||||
let hs_payload = &frame[8..pe];
|
||||
if hs_payload.len() < 4 {
|
||||
return Err(ProxyError::InvalidHandshake(
|
||||
"Handshake payload too short".to_string(),
|
||||
));
|
||||
}
|
||||
let hs_type = u32::from_le_bytes(hs_payload[0..4].try_into().unwrap());
|
||||
if hs_type == RPC_HANDSHAKE_ERROR_U32 {
|
||||
let err_code = if hs_payload.len() >= 8 {
|
||||
i32::from_le_bytes(hs_payload[4..8].try_into().unwrap())
|
||||
} else {
|
||||
-1
|
||||
};
|
||||
self.stats.increment_me_handshake_reject_total();
|
||||
self.stats.increment_me_handshake_error_code(err_code);
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"ME rejected handshake (error={err_code})"
|
||||
)));
|
||||
}
|
||||
let hs_flags = parse_handshake_flags(hs_payload)?;
|
||||
if hs_flags & 0xff != 0 {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"Unsupported handshake flags: 0x{hs_flags:08x}"
|
||||
)));
|
||||
}
|
||||
negotiated_crc_mode = if (hs_flags & requested_crc_mode.advertised_flags()) != 0 {
|
||||
RpcChecksumMode::from_handshake_flags(hs_flags)
|
||||
} else if (hs_flags & rpc_crypto_flags::USE_CRC32C) != 0 {
|
||||
return Err(ProxyError::InvalidHandshake(format!(
|
||||
"Peer negotiated unsupported CRC flags: 0x{hs_flags:08x}"
|
||||
)));
|
||||
} else {
|
||||
RpcChecksumMode::Crc32
|
||||
};
|
||||
|
||||
handshake_ok = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if !handshake_ok {
|
||||
return Err(ProxyError::TgHandshakeTimeout);
|
||||
}
|
||||
|
||||
let handshake_ms = hs_start.elapsed().as_secs_f64() * 1000.0;
|
||||
info!(%addr, "RPC handshake OK");
|
||||
|
||||
Ok(HandshakeOutput {
|
||||
rd,
|
||||
wr,
|
||||
read_key: rk,
|
||||
read_iv,
|
||||
write_key: wk,
|
||||
write_iv,
|
||||
crc_mode: negotiated_crc_mode,
|
||||
handshake_ms,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn hex_dump(data: &[u8]) -> String {
|
||||
const MAX: usize = 64;
|
||||
let mut out = String::with_capacity(data.len() * 2 + 3);
|
||||
for (i, b) in data.iter().take(MAX).enumerate() {
|
||||
if i > 0 {
|
||||
out.push(' ');
|
||||
}
|
||||
out.push_str(&format!("{b:02x}"));
|
||||
}
|
||||
if data.len() > MAX {
|
||||
out.push_str(" …");
|
||||
}
|
||||
out
|
||||
}
|
||||
@@ -1,38 +1,696 @@
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use rand::Rng;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::config::MeFloorMode;
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::protocol::constants::TG_MIDDLE_PROXIES_FLAT_V4;
|
||||
use crate::network::IpFamily;
|
||||
|
||||
use super::MePool;
|
||||
|
||||
pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, min_connections: usize) {
|
||||
const HEALTH_INTERVAL_SECS: u64 = 1;
|
||||
const JITTER_FRAC_NUM: u64 = 2; // jitter up to 50% of backoff
|
||||
#[allow(dead_code)]
|
||||
const MAX_CONCURRENT_PER_DC_DEFAULT: usize = 1;
|
||||
const SHADOW_ROTATE_RETRY_SECS: u64 = 30;
|
||||
const IDLE_REFRESH_TRIGGER_BASE_SECS: u64 = 45;
|
||||
const IDLE_REFRESH_TRIGGER_JITTER_SECS: u64 = 5;
|
||||
const IDLE_REFRESH_RETRY_SECS: u64 = 8;
|
||||
const IDLE_REFRESH_SUCCESS_GUARD_SECS: u64 = 5;
|
||||
|
||||
pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, _min_connections: usize) {
|
||||
let mut backoff: HashMap<(i32, IpFamily), u64> = HashMap::new();
|
||||
let mut next_attempt: HashMap<(i32, IpFamily), Instant> = HashMap::new();
|
||||
let mut inflight: HashMap<(i32, IpFamily), usize> = HashMap::new();
|
||||
let mut outage_backoff: HashMap<(i32, IpFamily), u64> = HashMap::new();
|
||||
let mut outage_next_attempt: HashMap<(i32, IpFamily), Instant> = HashMap::new();
|
||||
let mut single_endpoint_outage: HashSet<(i32, IpFamily)> = HashSet::new();
|
||||
let mut shadow_rotate_deadline: HashMap<(i32, IpFamily), Instant> = HashMap::new();
|
||||
let mut idle_refresh_next_attempt: HashMap<(i32, IpFamily), Instant> = HashMap::new();
|
||||
let mut adaptive_idle_since: HashMap<(i32, IpFamily), Instant> = HashMap::new();
|
||||
let mut adaptive_recover_until: HashMap<(i32, IpFamily), Instant> = HashMap::new();
|
||||
loop {
|
||||
tokio::time::sleep(Duration::from_secs(30)).await;
|
||||
let current = pool.connection_count();
|
||||
if current < min_connections {
|
||||
warn!(
|
||||
current,
|
||||
min = min_connections,
|
||||
"ME pool below minimum, reconnecting..."
|
||||
tokio::time::sleep(Duration::from_secs(HEALTH_INTERVAL_SECS)).await;
|
||||
pool.prune_closed_writers().await;
|
||||
check_family(
|
||||
IpFamily::V4,
|
||||
&pool,
|
||||
&rng,
|
||||
&mut backoff,
|
||||
&mut next_attempt,
|
||||
&mut inflight,
|
||||
&mut outage_backoff,
|
||||
&mut outage_next_attempt,
|
||||
&mut single_endpoint_outage,
|
||||
&mut shadow_rotate_deadline,
|
||||
&mut idle_refresh_next_attempt,
|
||||
&mut adaptive_idle_since,
|
||||
&mut adaptive_recover_until,
|
||||
)
|
||||
.await;
|
||||
check_family(
|
||||
IpFamily::V6,
|
||||
&pool,
|
||||
&rng,
|
||||
&mut backoff,
|
||||
&mut next_attempt,
|
||||
&mut inflight,
|
||||
&mut outage_backoff,
|
||||
&mut outage_next_attempt,
|
||||
&mut single_endpoint_outage,
|
||||
&mut shadow_rotate_deadline,
|
||||
&mut idle_refresh_next_attempt,
|
||||
&mut adaptive_idle_since,
|
||||
&mut adaptive_recover_until,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_family(
|
||||
family: IpFamily,
|
||||
pool: &Arc<MePool>,
|
||||
rng: &Arc<SecureRandom>,
|
||||
backoff: &mut HashMap<(i32, IpFamily), u64>,
|
||||
next_attempt: &mut HashMap<(i32, IpFamily), Instant>,
|
||||
inflight: &mut HashMap<(i32, IpFamily), usize>,
|
||||
outage_backoff: &mut HashMap<(i32, IpFamily), u64>,
|
||||
outage_next_attempt: &mut HashMap<(i32, IpFamily), Instant>,
|
||||
single_endpoint_outage: &mut HashSet<(i32, IpFamily)>,
|
||||
shadow_rotate_deadline: &mut HashMap<(i32, IpFamily), Instant>,
|
||||
idle_refresh_next_attempt: &mut HashMap<(i32, IpFamily), Instant>,
|
||||
adaptive_idle_since: &mut HashMap<(i32, IpFamily), Instant>,
|
||||
adaptive_recover_until: &mut HashMap<(i32, IpFamily), Instant>,
|
||||
) {
|
||||
let enabled = match family {
|
||||
IpFamily::V4 => pool.decision.ipv4_me,
|
||||
IpFamily::V6 => pool.decision.ipv6_me,
|
||||
};
|
||||
if !enabled {
|
||||
return;
|
||||
}
|
||||
|
||||
let map = match family {
|
||||
IpFamily::V4 => pool.proxy_map_v4.read().await.clone(),
|
||||
IpFamily::V6 => pool.proxy_map_v6.read().await.clone(),
|
||||
};
|
||||
|
||||
let mut dc_endpoints = HashMap::<i32, Vec<SocketAddr>>::new();
|
||||
for (dc, addrs) in map {
|
||||
let entry = dc_endpoints.entry(dc).or_default();
|
||||
for (ip, port) in addrs {
|
||||
entry.push(SocketAddr::new(ip, port));
|
||||
}
|
||||
}
|
||||
for endpoints in dc_endpoints.values_mut() {
|
||||
endpoints.sort_unstable();
|
||||
endpoints.dedup();
|
||||
}
|
||||
|
||||
if pool.floor_mode() == MeFloorMode::Static {
|
||||
adaptive_idle_since.clear();
|
||||
adaptive_recover_until.clear();
|
||||
}
|
||||
|
||||
let mut live_addr_counts = HashMap::<SocketAddr, usize>::new();
|
||||
let mut live_writer_ids_by_addr = HashMap::<SocketAddr, Vec<u64>>::new();
|
||||
for writer in pool.writers.read().await.iter().filter(|w| {
|
||||
!w.draining.load(std::sync::atomic::Ordering::Relaxed)
|
||||
}) {
|
||||
*live_addr_counts.entry(writer.addr).or_insert(0) += 1;
|
||||
live_writer_ids_by_addr
|
||||
.entry(writer.addr)
|
||||
.or_default()
|
||||
.push(writer.id);
|
||||
}
|
||||
let writer_idle_since = pool.registry.writer_idle_since_snapshot().await;
|
||||
|
||||
for (dc, endpoints) in dc_endpoints {
|
||||
if endpoints.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let key = (dc, family);
|
||||
let reduce_for_idle = should_reduce_floor_for_idle(
|
||||
pool,
|
||||
key,
|
||||
&endpoints,
|
||||
&live_writer_ids_by_addr,
|
||||
adaptive_idle_since,
|
||||
adaptive_recover_until,
|
||||
)
|
||||
.await;
|
||||
let required = pool.required_writers_for_dc_with_floor_mode(endpoints.len(), reduce_for_idle);
|
||||
let alive = endpoints
|
||||
.iter()
|
||||
.map(|addr| *live_addr_counts.get(addr).unwrap_or(&0))
|
||||
.sum::<usize>();
|
||||
|
||||
if endpoints.len() == 1 && pool.single_endpoint_outage_mode_enabled() && alive == 0 {
|
||||
if single_endpoint_outage.insert(key) {
|
||||
pool.stats.increment_me_single_endpoint_outage_enter_total();
|
||||
warn!(
|
||||
dc = %dc,
|
||||
?family,
|
||||
required,
|
||||
endpoint_count = endpoints.len(),
|
||||
"Single-endpoint DC outage detected"
|
||||
);
|
||||
}
|
||||
|
||||
recover_single_endpoint_outage(
|
||||
pool,
|
||||
rng,
|
||||
key,
|
||||
endpoints[0],
|
||||
required,
|
||||
outage_backoff,
|
||||
outage_next_attempt,
|
||||
)
|
||||
.await;
|
||||
continue;
|
||||
}
|
||||
|
||||
if single_endpoint_outage.remove(&key) {
|
||||
pool.stats.increment_me_single_endpoint_outage_exit_total();
|
||||
outage_backoff.remove(&key);
|
||||
outage_next_attempt.remove(&key);
|
||||
shadow_rotate_deadline.remove(&key);
|
||||
idle_refresh_next_attempt.remove(&key);
|
||||
adaptive_idle_since.remove(&key);
|
||||
adaptive_recover_until.remove(&key);
|
||||
info!(
|
||||
dc = %dc,
|
||||
?family,
|
||||
alive,
|
||||
required,
|
||||
endpoint_count = endpoints.len(),
|
||||
"Single-endpoint DC outage recovered"
|
||||
);
|
||||
let addrs = TG_MIDDLE_PROXIES_FLAT_V4.clone();
|
||||
for &(ip, port) in addrs.iter() {
|
||||
let needed = min_connections.saturating_sub(pool.connection_count());
|
||||
if needed == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
if alive >= required {
|
||||
maybe_refresh_idle_writer_for_dc(
|
||||
pool,
|
||||
rng,
|
||||
key,
|
||||
dc,
|
||||
family,
|
||||
&endpoints,
|
||||
alive,
|
||||
required,
|
||||
&live_writer_ids_by_addr,
|
||||
&writer_idle_since,
|
||||
idle_refresh_next_attempt,
|
||||
)
|
||||
.await;
|
||||
maybe_rotate_single_endpoint_shadow(
|
||||
pool,
|
||||
rng,
|
||||
key,
|
||||
dc,
|
||||
family,
|
||||
&endpoints,
|
||||
alive,
|
||||
required,
|
||||
&live_writer_ids_by_addr,
|
||||
shadow_rotate_deadline,
|
||||
)
|
||||
.await;
|
||||
continue;
|
||||
}
|
||||
let missing = required - alive;
|
||||
|
||||
let now = Instant::now();
|
||||
if let Some(ts) = next_attempt.get(&key)
|
||||
&& now < *ts
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
let max_concurrent = pool.me_reconnect_max_concurrent_per_dc.max(1) as usize;
|
||||
if *inflight.get(&key).unwrap_or(&0) >= max_concurrent {
|
||||
continue;
|
||||
}
|
||||
if pool.has_refill_inflight_for_endpoints(&endpoints).await {
|
||||
debug!(
|
||||
dc = %dc,
|
||||
?family,
|
||||
alive,
|
||||
required,
|
||||
endpoint_count = endpoints.len(),
|
||||
"Skipping health reconnect: immediate refill is already in flight for this DC group"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
*inflight.entry(key).or_insert(0) += 1;
|
||||
|
||||
let mut restored = 0usize;
|
||||
for _ in 0..missing {
|
||||
let res = tokio::time::timeout(
|
||||
pool.me_one_timeout,
|
||||
pool.connect_endpoints_round_robin(&endpoints, rng.as_ref()),
|
||||
)
|
||||
.await;
|
||||
match res {
|
||||
Ok(true) => {
|
||||
restored += 1;
|
||||
pool.stats.increment_me_reconnect_success();
|
||||
}
|
||||
for _ in 0..needed {
|
||||
let addr = SocketAddr::new(ip, port);
|
||||
match pool.connect_one(addr, &rng).await {
|
||||
Ok(()) => info!(%addr, "ME reconnected"),
|
||||
Err(e) => debug!(%addr, error = %e, "ME reconnect failed"),
|
||||
}
|
||||
Ok(false) => {
|
||||
pool.stats.increment_me_reconnect_attempt();
|
||||
debug!(dc = %dc, ?family, "ME round-robin reconnect failed")
|
||||
}
|
||||
Err(_) => {
|
||||
pool.stats.increment_me_reconnect_attempt();
|
||||
debug!(dc = %dc, ?family, "ME reconnect timed out");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let now_alive = alive + restored;
|
||||
if now_alive >= required {
|
||||
info!(
|
||||
dc = %dc,
|
||||
?family,
|
||||
alive = now_alive,
|
||||
required,
|
||||
endpoint_count = endpoints.len(),
|
||||
"ME writer floor restored for DC"
|
||||
);
|
||||
backoff.insert(key, pool.me_reconnect_backoff_base.as_millis() as u64);
|
||||
let jitter = pool.me_reconnect_backoff_base.as_millis() as u64 / JITTER_FRAC_NUM;
|
||||
let wait = pool.me_reconnect_backoff_base
|
||||
+ Duration::from_millis(rand::rng().random_range(0..=jitter.max(1)));
|
||||
next_attempt.insert(key, now + wait);
|
||||
} else {
|
||||
let curr = *backoff.get(&key).unwrap_or(&(pool.me_reconnect_backoff_base.as_millis() as u64));
|
||||
let next_ms = (curr.saturating_mul(2)).min(pool.me_reconnect_backoff_cap.as_millis() as u64);
|
||||
backoff.insert(key, next_ms);
|
||||
let jitter = next_ms / JITTER_FRAC_NUM;
|
||||
let wait = Duration::from_millis(next_ms)
|
||||
+ Duration::from_millis(rand::rng().random_range(0..=jitter.max(1)));
|
||||
next_attempt.insert(key, now + wait);
|
||||
if pool.is_runtime_ready() {
|
||||
warn!(
|
||||
dc = %dc,
|
||||
?family,
|
||||
alive = now_alive,
|
||||
required,
|
||||
endpoint_count = endpoints.len(),
|
||||
backoff_ms = next_ms,
|
||||
"DC writer floor is below required level, scheduled reconnect"
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
dc = %dc,
|
||||
?family,
|
||||
alive = now_alive,
|
||||
required,
|
||||
endpoint_count = endpoints.len(),
|
||||
backoff_ms = next_ms,
|
||||
"DC writer floor is below required level during startup, scheduled reconnect"
|
||||
);
|
||||
}
|
||||
}
|
||||
if let Some(v) = inflight.get_mut(&key) {
|
||||
*v = v.saturating_sub(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn maybe_refresh_idle_writer_for_dc(
|
||||
pool: &Arc<MePool>,
|
||||
rng: &Arc<SecureRandom>,
|
||||
key: (i32, IpFamily),
|
||||
dc: i32,
|
||||
family: IpFamily,
|
||||
endpoints: &[SocketAddr],
|
||||
alive: usize,
|
||||
required: usize,
|
||||
live_writer_ids_by_addr: &HashMap<SocketAddr, Vec<u64>>,
|
||||
writer_idle_since: &HashMap<u64, u64>,
|
||||
idle_refresh_next_attempt: &mut HashMap<(i32, IpFamily), Instant>,
|
||||
) {
|
||||
if alive < required {
|
||||
return;
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
if let Some(next) = idle_refresh_next_attempt.get(&key)
|
||||
&& now < *next
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
let now_epoch_secs = MePool::now_epoch_secs();
|
||||
let mut candidate: Option<(u64, SocketAddr, u64, u64)> = None;
|
||||
for endpoint in endpoints {
|
||||
let Some(writer_ids) = live_writer_ids_by_addr.get(endpoint) else {
|
||||
continue;
|
||||
};
|
||||
for writer_id in writer_ids {
|
||||
let Some(idle_since_epoch_secs) = writer_idle_since.get(writer_id).copied() else {
|
||||
continue;
|
||||
};
|
||||
let idle_age_secs = now_epoch_secs.saturating_sub(idle_since_epoch_secs);
|
||||
let threshold_secs = IDLE_REFRESH_TRIGGER_BASE_SECS
|
||||
+ (*writer_id % (IDLE_REFRESH_TRIGGER_JITTER_SECS + 1));
|
||||
if idle_age_secs < threshold_secs {
|
||||
continue;
|
||||
}
|
||||
if candidate
|
||||
.as_ref()
|
||||
.map(|(_, _, age, _)| idle_age_secs > *age)
|
||||
.unwrap_or(true)
|
||||
{
|
||||
candidate = Some((*writer_id, *endpoint, idle_age_secs, threshold_secs));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let Some((old_writer_id, endpoint, idle_age_secs, threshold_secs)) = candidate else {
|
||||
return;
|
||||
};
|
||||
|
||||
let rotate_ok = match tokio::time::timeout(pool.me_one_timeout, pool.connect_one(endpoint, rng.as_ref())).await {
|
||||
Ok(Ok(())) => true,
|
||||
Ok(Err(error)) => {
|
||||
debug!(
|
||||
dc = %dc,
|
||||
?family,
|
||||
%endpoint,
|
||||
old_writer_id,
|
||||
idle_age_secs,
|
||||
threshold_secs,
|
||||
%error,
|
||||
"Idle writer pre-refresh connect failed"
|
||||
);
|
||||
false
|
||||
}
|
||||
Err(_) => {
|
||||
debug!(
|
||||
dc = %dc,
|
||||
?family,
|
||||
%endpoint,
|
||||
old_writer_id,
|
||||
idle_age_secs,
|
||||
threshold_secs,
|
||||
"Idle writer pre-refresh connect timed out"
|
||||
);
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if !rotate_ok {
|
||||
idle_refresh_next_attempt.insert(key, now + Duration::from_secs(IDLE_REFRESH_RETRY_SECS));
|
||||
return;
|
||||
}
|
||||
|
||||
pool.mark_writer_draining_with_timeout(old_writer_id, pool.force_close_timeout(), false)
|
||||
.await;
|
||||
idle_refresh_next_attempt.insert(
|
||||
key,
|
||||
now + Duration::from_secs(IDLE_REFRESH_SUCCESS_GUARD_SECS),
|
||||
);
|
||||
info!(
|
||||
dc = %dc,
|
||||
?family,
|
||||
%endpoint,
|
||||
old_writer_id,
|
||||
idle_age_secs,
|
||||
threshold_secs,
|
||||
alive,
|
||||
required,
|
||||
"Idle writer refreshed before upstream idle timeout"
|
||||
);
|
||||
}
|
||||
|
||||
async fn should_reduce_floor_for_idle(
|
||||
pool: &Arc<MePool>,
|
||||
key: (i32, IpFamily),
|
||||
endpoints: &[SocketAddr],
|
||||
live_writer_ids_by_addr: &HashMap<SocketAddr, Vec<u64>>,
|
||||
adaptive_idle_since: &mut HashMap<(i32, IpFamily), Instant>,
|
||||
adaptive_recover_until: &mut HashMap<(i32, IpFamily), Instant>,
|
||||
) -> bool {
|
||||
if endpoints.len() != 1 || pool.floor_mode() != MeFloorMode::Adaptive {
|
||||
adaptive_idle_since.remove(&key);
|
||||
adaptive_recover_until.remove(&key);
|
||||
return false;
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
let endpoint = endpoints[0];
|
||||
let writer_ids = live_writer_ids_by_addr
|
||||
.get(&endpoint)
|
||||
.map(Vec::as_slice)
|
||||
.unwrap_or(&[]);
|
||||
let has_bound_clients = has_bound_clients_on_endpoint(pool, writer_ids).await;
|
||||
if has_bound_clients {
|
||||
adaptive_idle_since.remove(&key);
|
||||
adaptive_recover_until.insert(key, now + pool.adaptive_floor_recover_grace_duration());
|
||||
return false;
|
||||
}
|
||||
|
||||
if let Some(recover_until) = adaptive_recover_until.get(&key)
|
||||
&& now < *recover_until
|
||||
{
|
||||
adaptive_idle_since.remove(&key);
|
||||
return false;
|
||||
}
|
||||
adaptive_recover_until.remove(&key);
|
||||
|
||||
let idle_since = adaptive_idle_since.entry(key).or_insert(now);
|
||||
now.saturating_duration_since(*idle_since) >= pool.adaptive_floor_idle_duration()
|
||||
}
|
||||
|
||||
async fn has_bound_clients_on_endpoint(pool: &Arc<MePool>, writer_ids: &[u64]) -> bool {
|
||||
for writer_id in writer_ids {
|
||||
if !pool.registry.is_writer_empty(*writer_id).await {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
async fn recover_single_endpoint_outage(
|
||||
pool: &Arc<MePool>,
|
||||
rng: &Arc<SecureRandom>,
|
||||
key: (i32, IpFamily),
|
||||
endpoint: SocketAddr,
|
||||
required: usize,
|
||||
outage_backoff: &mut HashMap<(i32, IpFamily), u64>,
|
||||
outage_next_attempt: &mut HashMap<(i32, IpFamily), Instant>,
|
||||
) {
|
||||
let now = Instant::now();
|
||||
if let Some(ts) = outage_next_attempt.get(&key)
|
||||
&& now < *ts
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
let (min_backoff_ms, max_backoff_ms) = pool.single_endpoint_outage_backoff_bounds_ms();
|
||||
pool.stats
|
||||
.increment_me_single_endpoint_outage_reconnect_attempt_total();
|
||||
|
||||
let bypass_quarantine = pool.single_endpoint_outage_disable_quarantine();
|
||||
let attempt_ok = if bypass_quarantine {
|
||||
pool.stats
|
||||
.increment_me_single_endpoint_quarantine_bypass_total();
|
||||
match tokio::time::timeout(pool.me_one_timeout, pool.connect_one(endpoint, rng.as_ref())).await {
|
||||
Ok(Ok(())) => true,
|
||||
Ok(Err(e)) => {
|
||||
debug!(
|
||||
dc = %key.0,
|
||||
family = ?key.1,
|
||||
%endpoint,
|
||||
error = %e,
|
||||
"Single-endpoint outage reconnect failed (quarantine bypass path)"
|
||||
);
|
||||
false
|
||||
}
|
||||
Err(_) => {
|
||||
debug!(
|
||||
dc = %key.0,
|
||||
family = ?key.1,
|
||||
%endpoint,
|
||||
"Single-endpoint outage reconnect timed out (quarantine bypass path)"
|
||||
);
|
||||
false
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let one_endpoint = [endpoint];
|
||||
match tokio::time::timeout(
|
||||
pool.me_one_timeout,
|
||||
pool.connect_endpoints_round_robin(&one_endpoint, rng.as_ref()),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(ok) => ok,
|
||||
Err(_) => {
|
||||
debug!(
|
||||
dc = %key.0,
|
||||
family = ?key.1,
|
||||
%endpoint,
|
||||
"Single-endpoint outage reconnect timed out"
|
||||
);
|
||||
false
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if attempt_ok {
|
||||
pool.stats
|
||||
.increment_me_single_endpoint_outage_reconnect_success_total();
|
||||
pool.stats.increment_me_reconnect_success();
|
||||
outage_backoff.insert(key, min_backoff_ms);
|
||||
let jitter = min_backoff_ms / JITTER_FRAC_NUM;
|
||||
let wait = Duration::from_millis(min_backoff_ms)
|
||||
+ Duration::from_millis(rand::rng().random_range(0..=jitter.max(1)));
|
||||
outage_next_attempt.insert(key, now + wait);
|
||||
info!(
|
||||
dc = %key.0,
|
||||
family = ?key.1,
|
||||
%endpoint,
|
||||
required,
|
||||
backoff_ms = min_backoff_ms,
|
||||
"Single-endpoint outage reconnect succeeded"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
pool.stats.increment_me_reconnect_attempt();
|
||||
let current_ms = *outage_backoff.get(&key).unwrap_or(&min_backoff_ms);
|
||||
let next_ms = current_ms.saturating_mul(2).min(max_backoff_ms);
|
||||
outage_backoff.insert(key, next_ms);
|
||||
let jitter = next_ms / JITTER_FRAC_NUM;
|
||||
let wait = Duration::from_millis(next_ms)
|
||||
+ Duration::from_millis(rand::rng().random_range(0..=jitter.max(1)));
|
||||
outage_next_attempt.insert(key, now + wait);
|
||||
warn!(
|
||||
dc = %key.0,
|
||||
family = ?key.1,
|
||||
%endpoint,
|
||||
required,
|
||||
backoff_ms = next_ms,
|
||||
"Single-endpoint outage reconnect scheduled"
|
||||
);
|
||||
}
|
||||
|
||||
async fn maybe_rotate_single_endpoint_shadow(
|
||||
pool: &Arc<MePool>,
|
||||
rng: &Arc<SecureRandom>,
|
||||
key: (i32, IpFamily),
|
||||
dc: i32,
|
||||
family: IpFamily,
|
||||
endpoints: &[SocketAddr],
|
||||
alive: usize,
|
||||
required: usize,
|
||||
live_writer_ids_by_addr: &HashMap<SocketAddr, Vec<u64>>,
|
||||
shadow_rotate_deadline: &mut HashMap<(i32, IpFamily), Instant>,
|
||||
) {
|
||||
if endpoints.len() != 1 || alive < required {
|
||||
return;
|
||||
}
|
||||
|
||||
let Some(interval) = pool.single_endpoint_shadow_rotate_interval() else {
|
||||
return;
|
||||
};
|
||||
|
||||
let now = Instant::now();
|
||||
if let Some(deadline) = shadow_rotate_deadline.get(&key)
|
||||
&& now < *deadline
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
let endpoint = endpoints[0];
|
||||
if pool.is_endpoint_quarantined(endpoint).await {
|
||||
pool.stats
|
||||
.increment_me_single_endpoint_shadow_rotate_skipped_quarantine_total();
|
||||
shadow_rotate_deadline.insert(key, now + Duration::from_secs(SHADOW_ROTATE_RETRY_SECS));
|
||||
debug!(
|
||||
dc = %dc,
|
||||
?family,
|
||||
%endpoint,
|
||||
"Single-endpoint shadow rotation skipped: endpoint is quarantined"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let Some(writer_ids) = live_writer_ids_by_addr.get(&endpoint) else {
|
||||
shadow_rotate_deadline.insert(key, now + Duration::from_secs(SHADOW_ROTATE_RETRY_SECS));
|
||||
return;
|
||||
};
|
||||
|
||||
let mut candidate_writer_id = None;
|
||||
for writer_id in writer_ids {
|
||||
if pool.registry.is_writer_empty(*writer_id).await {
|
||||
candidate_writer_id = Some(*writer_id);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let Some(old_writer_id) = candidate_writer_id else {
|
||||
shadow_rotate_deadline.insert(key, now + Duration::from_secs(SHADOW_ROTATE_RETRY_SECS));
|
||||
debug!(
|
||||
dc = %dc,
|
||||
?family,
|
||||
%endpoint,
|
||||
alive,
|
||||
required,
|
||||
"Single-endpoint shadow rotation skipped: no empty writer candidate"
|
||||
);
|
||||
return;
|
||||
};
|
||||
|
||||
let rotate_ok = match tokio::time::timeout(pool.me_one_timeout, pool.connect_one(endpoint, rng.as_ref())).await {
|
||||
Ok(Ok(())) => true,
|
||||
Ok(Err(e)) => {
|
||||
debug!(
|
||||
dc = %dc,
|
||||
?family,
|
||||
%endpoint,
|
||||
error = %e,
|
||||
"Single-endpoint shadow rotation connect failed"
|
||||
);
|
||||
false
|
||||
}
|
||||
Err(_) => {
|
||||
debug!(
|
||||
dc = %dc,
|
||||
?family,
|
||||
%endpoint,
|
||||
"Single-endpoint shadow rotation connect timed out"
|
||||
);
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if !rotate_ok {
|
||||
shadow_rotate_deadline.insert(
|
||||
key,
|
||||
now + interval.min(Duration::from_secs(SHADOW_ROTATE_RETRY_SECS)),
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
pool.mark_writer_draining_with_timeout(old_writer_id, pool.force_close_timeout(), false)
|
||||
.await;
|
||||
pool.stats.increment_me_single_endpoint_shadow_rotate_total();
|
||||
shadow_rotate_deadline.insert(key, now + interval);
|
||||
info!(
|
||||
dc = %dc,
|
||||
?family,
|
||||
%endpoint,
|
||||
old_writer_id,
|
||||
rotate_every_secs = interval.as_secs(),
|
||||
"Single-endpoint shadow writer rotated"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,21 +1,42 @@
|
||||
//! Middle Proxy RPC transport.
|
||||
|
||||
mod codec;
|
||||
mod config_updater;
|
||||
mod handshake;
|
||||
mod health;
|
||||
mod pool;
|
||||
mod pool_config;
|
||||
mod pool_init;
|
||||
mod pool_nat;
|
||||
mod pool_refill;
|
||||
mod pool_reinit;
|
||||
mod pool_runtime_api;
|
||||
mod pool_writer;
|
||||
mod ping;
|
||||
mod reader;
|
||||
mod registry;
|
||||
mod rotation;
|
||||
mod send;
|
||||
mod secret;
|
||||
mod wire;
|
||||
mod pool_status;
|
||||
|
||||
use bytes::Bytes;
|
||||
|
||||
pub use health::me_health_monitor;
|
||||
#[allow(unused_imports)]
|
||||
pub use ping::{run_me_ping, format_sample_line, format_me_route, MePingReport, MePingSample, MePingFamily};
|
||||
pub use pool::MePool;
|
||||
#[allow(unused_imports)]
|
||||
pub use pool_nat::{stun_probe, detect_public_ip};
|
||||
pub use registry::ConnRegistry;
|
||||
pub use secret::fetch_proxy_secret;
|
||||
#[allow(unused_imports)]
|
||||
pub use config_updater::{
|
||||
ProxyConfigData, fetch_proxy_config, fetch_proxy_config_with_raw, load_proxy_config_cache,
|
||||
me_config_updater, save_proxy_config_cache,
|
||||
};
|
||||
pub use rotation::{MeReinitTrigger, me_reinit_scheduler, me_rotation_task};
|
||||
pub use wire::proto_flags_for_tag;
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
386
src/transport/middle_proxy/ping.rs
Normal file
386
src/transport/middle_proxy/ping.rs
Normal file
@@ -0,0 +1,386 @@
|
||||
use std::collections::HashMap;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::net::UdpSocket;
|
||||
|
||||
use crate::config::{UpstreamConfig, UpstreamType};
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::error::ProxyError;
|
||||
use crate::transport::{UpstreamEgressInfo, UpstreamRouteKind};
|
||||
|
||||
use super::MePool;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum MePingFamily {
|
||||
V4,
|
||||
V6,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MePingSample {
|
||||
pub dc: i32,
|
||||
pub addr: SocketAddr,
|
||||
pub route: Option<String>,
|
||||
pub connect_ms: Option<f64>,
|
||||
pub handshake_ms: Option<f64>,
|
||||
pub error: Option<String>,
|
||||
pub family: MePingFamily,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[allow(dead_code)]
|
||||
pub struct MePingReport {
|
||||
pub dc: i32,
|
||||
pub family: MePingFamily,
|
||||
pub samples: Vec<MePingSample>,
|
||||
}
|
||||
|
||||
pub fn format_sample_line(sample: &MePingSample) -> String {
|
||||
let sign = if sample.dc >= 0 { "+" } else { "-" };
|
||||
let addr = format!("{}:{}", sample.addr.ip(), sample.addr.port());
|
||||
|
||||
match (sample.connect_ms, sample.handshake_ms.as_ref(), sample.error.as_ref()) {
|
||||
(Some(conn), Some(hs), None) => format!(
|
||||
" {sign} {addr}\tPing: {:.0} ms / RPC: {:.0} ms / OK",
|
||||
conn, hs
|
||||
),
|
||||
(Some(conn), None, Some(err)) => format!(
|
||||
" {sign} {addr}\tPing: {:.0} ms / RPC: FAIL ({err})",
|
||||
conn
|
||||
),
|
||||
(None, _, Some(err)) => format!(" {sign} {addr}\tPing: FAIL ({err})"),
|
||||
(Some(conn), None, None) => format!(" {sign} {addr}\tPing: {:.0} ms / RPC: FAIL", conn),
|
||||
_ => format!(" {sign} {addr}\tPing: FAIL"),
|
||||
}
|
||||
}
|
||||
|
||||
fn format_direct_with_config(
|
||||
interface: &Option<String>,
|
||||
bind_addresses: &Option<Vec<String>>,
|
||||
) -> Option<String> {
|
||||
let mut direct_parts: Vec<String> = Vec::new();
|
||||
if let Some(dev) = interface.as_deref().filter(|v| !v.is_empty()) {
|
||||
direct_parts.push(format!("dev={dev}"));
|
||||
}
|
||||
if let Some(src) = bind_addresses.as_ref().filter(|v| !v.is_empty()) {
|
||||
direct_parts.push(format!("src={}", src.join(",")));
|
||||
}
|
||||
if direct_parts.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(format!("direct {}", direct_parts.join(" ")))
|
||||
}
|
||||
}
|
||||
|
||||
fn pick_target_for_family(reports: &[MePingReport], family: MePingFamily) -> Option<SocketAddr> {
|
||||
reports.iter().find_map(|report| {
|
||||
if report.family != family {
|
||||
return None;
|
||||
}
|
||||
report
|
||||
.samples
|
||||
.iter()
|
||||
.find(|s| s.error.is_none() && s.handshake_ms.is_some())
|
||||
.map(|s| s.addr)
|
||||
})
|
||||
}
|
||||
|
||||
fn route_from_egress(egress: Option<UpstreamEgressInfo>) -> Option<String> {
|
||||
let info = egress?;
|
||||
match info.route_kind {
|
||||
UpstreamRouteKind::Direct => {
|
||||
let src_ip = info
|
||||
.direct_bind_ip
|
||||
.or_else(|| info.local_addr.map(|addr| addr.ip()));
|
||||
let ip = src_ip?;
|
||||
let mut parts = Vec::new();
|
||||
if let Some(dev) = detect_interface_for_ip(ip) {
|
||||
parts.push(format!("dev={dev}"));
|
||||
}
|
||||
parts.push(format!("src={ip}"));
|
||||
Some(format!("direct {}", parts.join(" ")))
|
||||
}
|
||||
UpstreamRouteKind::Socks4 => {
|
||||
let route = info
|
||||
.socks_proxy_addr
|
||||
.map(|addr| format!("socks4://{addr}"))
|
||||
.unwrap_or_else(|| "socks4://unknown".to_string());
|
||||
Some(match info.socks_bound_addr {
|
||||
Some(bound) => format!("{route} bnd={bound}"),
|
||||
None => route,
|
||||
})
|
||||
}
|
||||
UpstreamRouteKind::Socks5 => {
|
||||
let route = info
|
||||
.socks_proxy_addr
|
||||
.map(|addr| format!("socks5://{addr}"))
|
||||
.unwrap_or_else(|| "socks5://unknown".to_string());
|
||||
Some(match info.socks_bound_addr {
|
||||
Some(bound) => format!("{route} bnd={bound}"),
|
||||
None => route,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn detect_interface_for_ip(ip: IpAddr) -> Option<String> {
|
||||
use nix::ifaddrs::getifaddrs;
|
||||
|
||||
if let Ok(addrs) = getifaddrs() {
|
||||
for iface in addrs {
|
||||
if let Some(address) = iface.address {
|
||||
if let Some(v4) = address.as_sockaddr_in() {
|
||||
if IpAddr::V4(v4.ip()) == ip {
|
||||
return Some(iface.interface_name);
|
||||
}
|
||||
} else if let Some(v6) = address.as_sockaddr_in6() {
|
||||
if IpAddr::V6(v6.ip()) == ip {
|
||||
return Some(iface.interface_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn detect_interface_for_ip(_ip: IpAddr) -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
||||
async fn detect_direct_route_details(
|
||||
reports: &[MePingReport],
|
||||
prefer_ipv6: bool,
|
||||
v4_ok: bool,
|
||||
v6_ok: bool,
|
||||
) -> Option<String> {
|
||||
let target_addr = if prefer_ipv6 && v6_ok {
|
||||
pick_target_for_family(reports, MePingFamily::V6)
|
||||
.or_else(|| pick_target_for_family(reports, MePingFamily::V4))
|
||||
} else if v4_ok {
|
||||
pick_target_for_family(reports, MePingFamily::V4)
|
||||
.or_else(|| pick_target_for_family(reports, MePingFamily::V6))
|
||||
} else {
|
||||
pick_target_for_family(reports, MePingFamily::V6)
|
||||
.or_else(|| pick_target_for_family(reports, MePingFamily::V4))
|
||||
}?;
|
||||
|
||||
let local_ip = if target_addr.is_ipv4() {
|
||||
let sock = UdpSocket::bind("0.0.0.0:0").await.ok()?;
|
||||
sock.connect(target_addr).await.ok()?;
|
||||
sock.local_addr().ok().map(|a| a.ip())
|
||||
} else {
|
||||
let sock = UdpSocket::bind("[::]:0").await.ok()?;
|
||||
sock.connect(target_addr).await.ok()?;
|
||||
sock.local_addr().ok().map(|a| a.ip())
|
||||
};
|
||||
|
||||
let mut parts = Vec::new();
|
||||
if let Some(ip) = local_ip {
|
||||
if let Some(dev) = detect_interface_for_ip(ip) {
|
||||
parts.push(format!("dev={dev}"));
|
||||
}
|
||||
parts.push(format!("src={ip}"));
|
||||
}
|
||||
|
||||
if parts.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(format!("direct {}", parts.join(" ")))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn format_me_route(
|
||||
upstreams: &[UpstreamConfig],
|
||||
reports: &[MePingReport],
|
||||
prefer_ipv6: bool,
|
||||
v4_ok: bool,
|
||||
v6_ok: bool,
|
||||
) -> String {
|
||||
if let Some(route) = reports
|
||||
.iter()
|
||||
.flat_map(|report| report.samples.iter())
|
||||
.find(|sample| sample.error.is_none() && sample.handshake_ms.is_some())
|
||||
.and_then(|sample| sample.route.clone())
|
||||
{
|
||||
return route;
|
||||
}
|
||||
|
||||
let enabled_upstreams: Vec<_> = upstreams.iter().filter(|u| u.enabled).collect();
|
||||
if enabled_upstreams.is_empty() {
|
||||
return detect_direct_route_details(reports, prefer_ipv6, v4_ok, v6_ok)
|
||||
.await
|
||||
.unwrap_or_else(|| "direct".to_string());
|
||||
}
|
||||
|
||||
if enabled_upstreams.len() == 1 {
|
||||
return match &enabled_upstreams[0].upstream_type {
|
||||
UpstreamType::Direct {
|
||||
interface,
|
||||
bind_addresses,
|
||||
} => {
|
||||
if let Some(route) = format_direct_with_config(interface, bind_addresses) {
|
||||
route
|
||||
} else {
|
||||
detect_direct_route_details(reports, prefer_ipv6, v4_ok, v6_ok)
|
||||
.await
|
||||
.unwrap_or_else(|| "direct".to_string())
|
||||
}
|
||||
}
|
||||
UpstreamType::Socks4 { address, .. } => format!("socks4://{address}"),
|
||||
UpstreamType::Socks5 { address, .. } => format!("socks5://{address}"),
|
||||
};
|
||||
}
|
||||
|
||||
let has_direct = enabled_upstreams
|
||||
.iter()
|
||||
.any(|u| matches!(u.upstream_type, UpstreamType::Direct { .. }));
|
||||
let has_socks4 = enabled_upstreams
|
||||
.iter()
|
||||
.any(|u| matches!(u.upstream_type, UpstreamType::Socks4 { .. }));
|
||||
let has_socks5 = enabled_upstreams
|
||||
.iter()
|
||||
.any(|u| matches!(u.upstream_type, UpstreamType::Socks5 { .. }));
|
||||
let mut kinds = Vec::new();
|
||||
if has_direct {
|
||||
kinds.push("direct");
|
||||
}
|
||||
if has_socks4 {
|
||||
kinds.push("socks4");
|
||||
}
|
||||
if has_socks5 {
|
||||
kinds.push("socks5");
|
||||
}
|
||||
format!("mixed upstreams ({})", kinds.join(", "))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
|
||||
fn sample(base: MePingSample) -> MePingSample {
|
||||
base
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ok_line_contains_both_timings() {
|
||||
let s = sample(MePingSample {
|
||||
dc: 4,
|
||||
addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8888),
|
||||
route: Some("direct src=1.2.3.4".to_string()),
|
||||
connect_ms: Some(12.3),
|
||||
handshake_ms: Some(34.7),
|
||||
error: None,
|
||||
family: MePingFamily::V4,
|
||||
});
|
||||
let line = format_sample_line(&s);
|
||||
assert!(line.contains("Ping: 12 ms"));
|
||||
assert!(line.contains("RPC: 35 ms"));
|
||||
assert!(line.contains("OK"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn error_line_mentions_reason() {
|
||||
let s = sample(MePingSample {
|
||||
dc: -5,
|
||||
addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(5, 6, 7, 8)), 80),
|
||||
route: Some("socks5".to_string()),
|
||||
connect_ms: Some(10.0),
|
||||
handshake_ms: None,
|
||||
error: Some("handshake timeout".to_string()),
|
||||
family: MePingFamily::V4,
|
||||
});
|
||||
let line = format_sample_line(&s);
|
||||
assert!(line.contains("- 5.6.7.8:80"));
|
||||
assert!(line.contains("handshake timeout"));
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_me_ping(pool: &Arc<MePool>, rng: &SecureRandom) -> Vec<MePingReport> {
|
||||
let mut reports = Vec::new();
|
||||
|
||||
let v4_map = if pool.decision.ipv4_me {
|
||||
pool.proxy_map_v4.read().await.clone()
|
||||
} else {
|
||||
HashMap::new()
|
||||
};
|
||||
let v6_map = if pool.decision.ipv6_me {
|
||||
pool.proxy_map_v6.read().await.clone()
|
||||
} else {
|
||||
HashMap::new()
|
||||
};
|
||||
|
||||
let mut grouped: Vec<(MePingFamily, i32, Vec<(IpAddr, u16)>)> = Vec::new();
|
||||
for (dc, addrs) in v4_map {
|
||||
grouped.push((MePingFamily::V4, dc, addrs));
|
||||
}
|
||||
for (dc, addrs) in v6_map {
|
||||
grouped.push((MePingFamily::V6, dc, addrs));
|
||||
}
|
||||
|
||||
for (family, dc, addrs) in grouped {
|
||||
let mut samples = Vec::new();
|
||||
for (ip, port) in addrs {
|
||||
let addr = SocketAddr::new(ip, port);
|
||||
let mut connect_ms = None;
|
||||
let mut handshake_ms = None;
|
||||
let mut error = None;
|
||||
let mut route = None;
|
||||
|
||||
match pool.connect_tcp(addr).await {
|
||||
Ok((stream, conn_rtt, upstream_egress)) => {
|
||||
connect_ms = Some(conn_rtt);
|
||||
route = route_from_egress(upstream_egress);
|
||||
match pool.handshake_only(stream, addr, upstream_egress, rng).await {
|
||||
Ok(hs) => {
|
||||
handshake_ms = Some(hs.handshake_ms);
|
||||
// drop halves to close
|
||||
drop(hs.rd);
|
||||
drop(hs.wr);
|
||||
}
|
||||
Err(e) => {
|
||||
error = Some(short_err(&e));
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error = Some(short_err(&e));
|
||||
}
|
||||
}
|
||||
|
||||
samples.push(MePingSample {
|
||||
dc,
|
||||
addr,
|
||||
route,
|
||||
connect_ms,
|
||||
handshake_ms,
|
||||
error,
|
||||
family,
|
||||
});
|
||||
}
|
||||
|
||||
reports.push(MePingReport {
|
||||
dc,
|
||||
family,
|
||||
samples,
|
||||
});
|
||||
}
|
||||
|
||||
reports
|
||||
}
|
||||
|
||||
fn short_err(err: &ProxyError) -> String {
|
||||
match err {
|
||||
ProxyError::ConnectionTimeout { .. } => "connect timeout".to_string(),
|
||||
ProxyError::TgHandshakeTimeout => "handshake timeout".to_string(),
|
||||
ProxyError::InvalidHandshake(e) => format!("bad handshake: {e}"),
|
||||
ProxyError::Crypto(e) => format!("crypto: {e}"),
|
||||
ProxyError::Proxy(e) => format!("proxy: {e}"),
|
||||
ProxyError::Io(e) => format!("io: {e}"),
|
||||
_ => format!("{err}"),
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user