mirror of https://github.com/telemt/telemt.git
Compare commits
678 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
4d87a790cc | |
|
|
07fed8f871 | |
|
|
407d686d49 | |
|
|
eac5cc81fb | |
|
|
c51d16f403 | |
|
|
b5146bba94 | |
|
|
5ed525fa48 | |
|
|
9f7c1693ce | |
|
|
1524396e10 | |
|
|
e630ea0045 | |
|
|
4574e423c6 | |
|
|
5f5582865e | |
|
|
1f54e4a203 | |
|
|
defa37da05 | |
|
|
5fd058b6fd | |
|
|
977ee53b72 | |
|
|
5b11522620 | |
|
|
8fe6fcb7eb | |
|
|
486e439ae6 | |
|
|
444a20672d | |
|
|
8e7b27a16d | |
|
|
7f0057acd7 | |
|
|
7fe38f1b9f | |
|
|
c2f16a343a | |
|
|
6ea867ce36 | |
|
|
d673935b6d | |
|
|
363b5014f7 | |
|
|
bb6237151c | |
|
|
a9f695623d | |
|
|
5c29870632 | |
|
|
f6704d7d65 | |
|
|
3d20002e56 | |
|
|
8fcd0fa950 | |
|
|
645e968778 | |
|
|
b46216d357 | |
|
|
8ac1a0017d | |
|
|
3df274caa6 | |
|
|
780546a680 | |
|
|
729ffa0fcd | |
|
|
e594d6f079 | |
|
|
ecd6a19246 | |
|
|
2df6b8704d | |
|
|
5f5a046710 | |
|
|
2dc81ad0e0 | |
|
|
d8d8534cf8 | |
|
|
6c850e4150 | |
|
|
b8cf596e7d | |
|
|
5bf56b6dd8 | |
|
|
65da1f91ec | |
|
|
f3e9d00132 | |
|
|
dee6e13fef | |
|
|
07d774a82a | |
|
|
618bc7e0b6 | |
|
|
d06ac222d6 | |
|
|
567453e0f8 | |
|
|
cba837745b | |
|
|
876c8f1612 | |
|
|
ac8ad864be | |
|
|
fe56dc7c1a | |
|
|
96ae01078c | |
|
|
3b9919fa4d | |
|
|
6c4a3b59f9 | |
|
|
01c3d0a707 | |
|
|
fbee4631d6 | |
|
|
d0b52ea299 | |
|
|
677195e587 | |
|
|
a383efcb21 | |
|
|
cb5753f77c | |
|
|
7a075b2ffe | |
|
|
7de822dd15 | |
|
|
1bbf4584a6 | |
|
|
70479c4094 | |
|
|
b94746a6e0 | |
|
|
ceae1564af | |
|
|
7ce5fc66db | |
|
|
41493462a1 | |
|
|
6ee4d4648c | |
|
|
97f6649584 | |
|
|
dc6b6d3f9d | |
|
|
1c3e0d4e46 | |
|
|
0b78583cf5 | |
|
|
28d318d724 | |
|
|
70c2f0f045 | |
|
|
b9b1271f14 | |
|
|
3c734bd811 | |
|
|
6391df0583 | |
|
|
6a781c8bc3 | |
|
|
138652af8e | |
|
|
59157d31a6 | |
|
|
8bab3f70e1 | |
|
|
41d786cc11 | |
|
|
c43de1bd2a | |
|
|
101efe45b7 | |
|
|
11df61c6ac | |
|
|
08684bcbd2 | |
|
|
744fb4425f | |
|
|
80cb1bc221 | |
|
|
8461556b02 | |
|
|
cfd516edf3 | |
|
|
803c2c0492 | |
|
|
b762bd029f | |
|
|
761679d306 | |
|
|
41668b153d | |
|
|
1d2f88ad29 | |
|
|
80917f5abc | |
|
|
dc61d300ab | |
|
|
ae16080de5 | |
|
|
b8ca1fc166 | |
|
|
f9986944df | |
|
|
cb877c2bc3 | |
|
|
4426082c17 | |
|
|
22097f8c7c | |
|
|
1450af60a0 | |
|
|
f1cc8d65f2 | |
|
|
ec7e808daf | |
|
|
e4b7e23e76 | |
|
|
8b92b80b4a | |
|
|
f7868aa00f | |
|
|
655a08fa5c | |
|
|
8bc432db49 | |
|
|
a40d6929e5 | |
|
|
8db566dbe9 | |
|
|
bb71de0230 | |
|
|
62a258f8e3 | |
|
|
c868eaae74 | |
|
|
8e1860f912 | |
|
|
814bef9d99 | |
|
|
3ceda15073 | |
|
|
a3a6ea2880 | |
|
|
24156b5067 | |
|
|
a1dfa5b11d | |
|
|
800356c751 | |
|
|
1546b012a6 | |
|
|
e6b77af931 | |
|
|
8cfaab9320 | |
|
|
2d69b9d0ae | |
|
|
41c2b4de65 | |
|
|
0a5e8a09fd | |
|
|
2f9fddfa6f | |
|
|
6f4356f72a | |
|
|
0c3c9009a9 | |
|
|
0475844701 | |
|
|
1abf9bd05c | |
|
|
6f17d4d231 | |
|
|
bf30e93284 | |
|
|
91be148b72 | |
|
|
e46d2cfc52 | |
|
|
d4cda6d546 | |
|
|
e35d69c61f | |
|
|
a353a94175 | |
|
|
b856250b2c | |
|
|
97d1476ded | |
|
|
cde14fc1bf | |
|
|
5723d50d0b | |
|
|
3eb384e02a | |
|
|
c960e0e245 | |
|
|
6fc188f0c4 | |
|
|
5c9fea5850 | |
|
|
3011a9ef6d | |
|
|
7b570be5b3 | |
|
|
0461bc65c6 | |
|
|
ead23608f0 | |
|
|
cf82b637d2 | |
|
|
2e8bfa1101 | |
|
|
d091b0b251 | |
|
|
56fc6c4896 | |
|
|
042d4fd612 | |
|
|
bbc69f945e | |
|
|
03c9a2588f | |
|
|
9de8b2f0bf | |
|
|
76eb8634a4 | |
|
|
4e5b67bae8 | |
|
|
bb2f3b24ac | |
|
|
73f218b62a | |
|
|
9cbc625b9b | |
|
|
13ff3af1db | |
|
|
d3f32b5568 | |
|
|
77f717e3d1 | |
|
|
db3e246390 | |
|
|
388e14d01f | |
|
|
b74ba38d40 | |
|
|
269fce839f | |
|
|
5a4072c964 | |
|
|
95685adba7 | |
|
|
909714af31 | |
|
|
dc2b4395bd | |
|
|
39875afbff | |
|
|
2ea7813ed4 | |
|
|
2d3c2807ab | |
|
|
50ae16ddf7 | |
|
|
de5c26b7d7 | |
|
|
a95678988a | |
|
|
b17482ede3 | |
|
|
a059de9191 | |
|
|
e7e763888b | |
|
|
c0a3e43aa8 | |
|
|
4c32370b25 | |
|
|
a6c298b633 | |
|
|
e7a1d26e6e | |
|
|
b91c6cb339 | |
|
|
e676633dcd | |
|
|
c4e7f54cbe | |
|
|
f85205d48d | |
|
|
d767ec02ee | |
|
|
51835c33f2 | |
|
|
88a4c652b6 | |
|
|
ea2d964502 | |
|
|
bd7218c39c | |
|
|
3055637571 | |
|
|
19b84b9d73 | |
|
|
165a1ede57 | |
|
|
6ead8b1922 | |
|
|
63aa1038c0 | |
|
|
4473826303 | |
|
|
d7bbb376c9 | |
|
|
7a8f946029 | |
|
|
f2e6dc1774 | |
|
|
54d65dd124 | |
|
|
24594e648e | |
|
|
e8b38ea860 | |
|
|
b14c2b0a9b | |
|
|
c1ee43fbac | |
|
|
c8632de5b6 | |
|
|
b930ea1ec5 | |
|
|
3b86a883b9 | |
|
|
5933b5e821 | |
|
|
8188fedf6a | |
|
|
f3598cf309 | |
|
|
f2335c211c | |
|
|
246ca11b88 | |
|
|
bb355e916f | |
|
|
777b15b1da | |
|
|
8814854ae4 | |
|
|
44c65f9c60 | |
|
|
1260217be9 | |
|
|
ebd37932c5 | |
|
|
43d7e6e991 | |
|
|
0eca535955 | |
|
|
3abde52de8 | |
|
|
801f670827 | |
|
|
99ba2f7bbc | |
|
|
1689b8a5dc | |
|
|
babd902d95 | |
|
|
e14dd07220 | |
|
|
d93a4fbd53 | |
|
|
2798039ab8 | |
|
|
9dce748679 | |
|
|
79093679ab | |
|
|
35a8f5b2e5 | |
|
|
456c433875 | |
|
|
8f1ffe8c25 | |
|
|
342b0119dd | |
|
|
2605929b93 | |
|
|
36814b6355 | |
|
|
269ba537ad | |
|
|
5c0eb6dbe8 | |
|
|
a78c3e3ebd | |
|
|
a4b70405b8 | |
|
|
3afc3e1775 | |
|
|
512bee6a8d | |
|
|
66867d3f5b | |
|
|
db36945293 | |
|
|
5c5fdcb124 | |
|
|
0ded366199 | |
|
|
84a34cea3d | |
|
|
7dc3c3666d | |
|
|
dd07fa9453 | |
|
|
bb1a372ac4 | |
|
|
6661401a34 | |
|
|
cd65fb432b | |
|
|
caf0717789 | |
|
|
4a610d83a3 | |
|
|
aba4205dcc | |
|
|
ef9b7b1492 | |
|
|
d112f15b90 | |
|
|
b55b264345 | |
|
|
f61d25ebe0 | |
|
|
ed4d1167dd | |
|
|
dc6948cf39 | |
|
|
4f11aa0772 | |
|
|
6ea8ba25c4 | |
|
|
e40361b171 | |
|
|
1c6c73beda | |
|
|
3f3bf5bbd2 | |
|
|
ec793f3065 | |
|
|
e83d366518 | |
|
|
5a4209fe00 | |
|
|
e7daf51193 | |
|
|
754e4db8a9 | |
|
|
7416829e89 | |
|
|
c07b600acb | |
|
|
7b44496706 | |
|
|
67dc1e8d18 | |
|
|
ad8ada33c9 | |
|
|
bbb201b433 | |
|
|
8d1faece60 | |
|
|
a603505f90 | |
|
|
f8c42c324f | |
|
|
dd8ef4d996 | |
|
|
e6ad9e4c7f | |
|
|
2a01ca2d6f | |
|
|
dc3363aa0d | |
|
|
f655924323 | |
|
|
05c066c676 | |
|
|
1e000c2e7e | |
|
|
fa17e719f6 | |
|
|
ae3ced8e7c | |
|
|
3279f6d46a | |
|
|
6f9aef7bb4 | |
|
|
049db1196f | |
|
|
c8ffc23cf7 | |
|
|
f230f2ce0e | |
|
|
bdac6e3480 | |
|
|
a4e9746dc7 | |
|
|
c47495d671 | |
|
|
44376b5652 | |
|
|
5ae3a90d5e | |
|
|
c7cf37898b | |
|
|
901a0b7c23 | |
|
|
03891db0c9 | |
|
|
89e5668c7e | |
|
|
1935455256 | |
|
|
20e205189c | |
|
|
1544e3fcff | |
|
|
85295a9961 | |
|
|
a54f807a45 | |
|
|
31f6258c47 | |
|
|
062464175e | |
|
|
a5983c17d3 | |
|
|
def42f0baa | |
|
|
30ba41eb47 | |
|
|
42f946f29e | |
|
|
c53d7951b5 | |
|
|
f36e264093 | |
|
|
a3bdf64353 | |
|
|
2aa7ea5137 | |
|
|
462c927da6 | |
|
|
cb87b2eac3 | |
|
|
3739f38440 | |
|
|
97d4a1c5c8 | |
|
|
c2443e6f1a | |
|
|
a7cffb547e | |
|
|
f0c37f233e | |
|
|
8e96039a1c | |
|
|
36b360dfb6 | |
|
|
60953bcc2c | |
|
|
2c06288b40 | |
|
|
0284b9f9e3 | |
|
|
4e3f42dce3 | |
|
|
5dd0c47f14 | |
|
|
50a827e7fd | |
|
|
d81140ccec | |
|
|
4739083f57 | |
|
|
c540a6657f | |
|
|
4808a30185 | |
|
|
1357f3cc4c | |
|
|
d9aa6f4956 | |
|
|
37a31c13cb | |
|
|
35bca7d4cc | |
|
|
f39d317d93 | |
|
|
d4d93aabf5 | |
|
|
c9271d9083 | |
|
|
4f55d08c51 | |
|
|
9c9ba4becd | |
|
|
93caab1aec | |
|
|
0c6bb3a641 | |
|
|
b2e15327fe | |
|
|
2e8be87ccf | |
|
|
d78360982c | |
|
|
bd0cefdb12 | |
|
|
e2ed1eb286 | |
|
|
822bcbf7a5 | |
|
|
b25ec97a43 | |
|
|
a74def9561 | |
|
|
8821e38013 | |
|
|
a1caebbe6f | |
|
|
e0d821c6b6 | |
|
|
205fc88718 | |
|
|
95c1306166 | |
|
|
e1ef192c10 | |
|
|
ee4d15fed6 | |
|
|
0040e9b6da | |
|
|
2c10560795 | |
|
|
5eff38eb82 | |
|
|
b6206a6dfe | |
|
|
e4a50f9286 | |
|
|
213ce4555a | |
|
|
5a16e68487 | |
|
|
6ffbc51fb0 | |
|
|
4d8a5ca174 | |
|
|
0ae67db492 | |
|
|
c4f77814ee | |
|
|
dcab19a64f | |
|
|
f10ca192fa | |
|
|
92972ab6bf | |
|
|
c351e08c43 | |
|
|
e29855c8c6 | |
|
|
3634fbd7e8 | |
|
|
bb29797bfb | |
|
|
3d5af3d248 | |
|
|
2d7df3da6c | |
|
|
4abc0e5134 | |
|
|
4028579068 | |
|
|
58f26ba8a7 | |
|
|
2be3e4ab7f | |
|
|
3d43ff6e57 | |
|
|
1294da586f | |
|
|
ac0698b772 | |
|
|
2bd9036908 | |
|
|
dda31b3d2f | |
|
|
7d5e1cb9e8 | |
|
|
56e38e8d00 | |
|
|
4677b43c6e | |
|
|
4ddbb97908 | |
|
|
8b0b47145d | |
|
|
f7e3ddcdb6 | |
|
|
af5cff3304 | |
|
|
cb9144bdb3 | |
|
|
fa82634faf | |
|
|
37b1a0289e | |
|
|
9be33bcf93 | |
|
|
bc9f691284 | |
|
|
58e5605f39 | |
|
|
75a654c766 | |
|
|
2b058f7df7 | |
|
|
01af2999bb | |
|
|
c12d27f08a | |
|
|
5e3408e80b | |
|
|
052110618d | |
|
|
47b8f0f656 | |
|
|
67b2e25e39 | |
|
|
9a08b541ed | |
|
|
04379b4374 | |
|
|
5cfb05b1f4 | |
|
|
aa68ce531e | |
|
|
d4ce304a37 | |
|
|
8a579d9bda | |
|
|
70cc6f22aa | |
|
|
1674ba36b2 | |
|
|
0c1a5c24d5 | |
|
|
5df08300e2 | |
|
|
543a87e166 | |
|
|
519c8d276b | |
|
|
4dc733d3e3 | |
|
|
4506f38bfb | |
|
|
b9a33c14bb | |
|
|
50caeb1803 | |
|
|
e57a93880b | |
|
|
dbfc43395e | |
|
|
89923dbaa2 | |
|
|
780fafa604 | |
|
|
a15f74a6f9 | |
|
|
690635d904 | |
|
|
d1372c5c1b | |
|
|
5073248911 | |
|
|
ae72e6f356 | |
|
|
b8da986fd5 | |
|
|
dd270258bf | |
|
|
40dc6a39c1 | |
|
|
8b5cbb7b4b | |
|
|
0e476c71a5 | |
|
|
be24b47300 | |
|
|
8cd719da3f | |
|
|
959d385015 | |
|
|
6fa01d4c36 | |
|
|
a383f3f1a3 | |
|
|
7635aad1cb | |
|
|
b315e84136 | |
|
|
1d8de09a32 | |
|
|
d2db9b8cf9 | |
|
|
796279343e | |
|
|
fabb3c45f1 | |
|
|
161af51558 | |
|
|
100ef0fa28 | |
|
|
8994c27714 | |
|
|
b950987229 | |
|
|
a09b597fab | |
|
|
c920dc6381 | |
|
|
f4418d2d50 | |
|
|
5ab3170f69 | |
|
|
76fa06fa2e | |
|
|
3a997fcf71 | |
|
|
4b49b1b4f0 | |
|
|
97926b05e8 | |
|
|
6de17ae830 | |
|
|
4c94f73546 | |
|
|
d99df37ac5 | |
|
|
d0f253b49b | |
|
|
ef2ed3daa0 | |
|
|
fc52cad109 | |
|
|
98f365be44 | |
|
|
b6c3cae2ad | |
|
|
5f7fb15dd8 | |
|
|
3a89f16332 | |
|
|
aa3fcfbbe1 | |
|
|
a616775f6d | |
|
|
633af93b19 | |
|
|
b41257f54e | |
|
|
76b28aea74 | |
|
|
aa315f5d72 | |
|
|
c28b82a618 | |
|
|
e7bdc80956 | |
|
|
d641137537 | |
|
|
4fd22b3219 | |
|
|
fca0e3f619 | |
|
|
9401c46727 | |
|
|
6b3697ee87 | |
|
|
c08160600e | |
|
|
cd5c60ce1e | |
|
|
ae1c97e27a | |
|
|
cfee7de66b | |
|
|
c942c492ad | |
|
|
0e4be43b2b | |
|
|
7eb2b60855 | |
|
|
373ae3281e | |
|
|
178630e3bf | |
|
|
67f307cd43 | |
|
|
ca2eaa9ead | |
|
|
3c78daea0c | |
|
|
d2baa8e721 | |
|
|
a0cf4b4713 | |
|
|
1bd249b0a9 | |
|
|
2f47ec5797 | |
|
|
80f3661b8e | |
|
|
32eeb4a98c | |
|
|
a74cc14ed9 | |
|
|
5f77f83b48 | |
|
|
d543dbca92 | |
|
|
02f9d59f5a | |
|
|
7b745bc7bc | |
|
|
5ac0ef1ffd | |
|
|
e1f3efb619 | |
|
|
508eea0131 | |
|
|
9e7f80b9b3 | |
|
|
ee2def2e62 | |
|
|
258191ab87 | |
|
|
27e6dec018 | |
|
|
26323dbebf | |
|
|
484137793f | |
|
|
24713feddc | |
|
|
93f58524d1 | |
|
|
0ff2e95e49 | |
|
|
89222e7123 | |
|
|
2468ee15e7 | |
|
|
3440aa9fcd | |
|
|
ce9698d39b | |
|
|
ddfe7c5cfa | |
|
|
01893f3712 | |
|
|
8ae741ec72 | |
|
|
6856466cef | |
|
|
68292fbd26 | |
|
|
e90c42ae68 | |
|
|
9f9a5dce0d | |
|
|
6739cd8d01 | |
|
|
6cc8d9cb00 | |
|
|
ce375b62e4 | |
|
|
95971ac62c | |
|
|
4ea2226dcd | |
|
|
d752a440e5 | |
|
|
5ce2ee2dae | |
|
|
6fd9f0595d | |
|
|
fcdd8a9796 | |
|
|
640468d4e7 | |
|
|
02fe89f7d0 | |
|
|
24df865503 | |
|
|
e9f8c79498 | |
|
|
24ff75701e | |
|
|
4221230969 | |
|
|
d87196c105 | |
|
|
da89415961 | |
|
|
2d98ebf3c3 | |
|
|
fb5e9947bd | |
|
|
2ea85c00d3 | |
|
|
2a3b6b917f | |
|
|
83ed9065b0 | |
|
|
44b825edf5 | |
|
|
487e95a66e | |
|
|
c465c200c4 | |
|
|
d7716ad875 | |
|
|
edce194948 | |
|
|
13fdff750d | |
|
|
bdcf110c87 | |
|
|
dd12997744 | |
|
|
fc160913bf | |
|
|
92c22ef16d | |
|
|
aff22d0855 | |
|
|
b3d3bca15a | |
|
|
92f38392eb | |
|
|
30ef8df1b3 | |
|
|
2e174adf16 | |
|
|
4e803b1412 | |
|
|
9b174318ce | |
|
|
99edcbe818 | |
|
|
ef7dc2b80f | |
|
|
691607f269 | |
|
|
55561a23bc | |
|
|
f32c34f126 | |
|
|
8f3bdaec2c | |
|
|
69b02caf77 | |
|
|
3854955069 | |
|
|
9b84fc7a5b | |
|
|
e7cb9238dc | |
|
|
0e2cbe6178 | |
|
|
cd076aeeeb | |
|
|
d683faf922 | |
|
|
0494f8ac8b | |
|
|
48ce59900e | |
|
|
84e95fd229 | |
|
|
a80be78345 | |
|
|
64130dd02e | |
|
|
d62a6e0417 | |
|
|
3260746785 | |
|
|
8066ea2163 | |
|
|
813f1df63e | |
|
|
09bdafa718 | |
|
|
fb0f75df43 | |
|
|
39255df549 | |
|
|
456495fd62 | |
|
|
83cadc0bf3 | |
|
|
0b1a8cd3f8 | |
|
|
565b4ee923 | |
|
|
7a9c1e79c2 | |
|
|
02c6af4912 | |
|
|
8ba4dea59f | |
|
|
ccfda10713 | |
|
|
bd1327592e | |
|
|
30b22fe2bf | |
|
|
651f257a5d | |
|
|
a9209fd3c7 | |
|
|
4ae4ca8ca8 | |
|
|
8be1ddc0d8 | |
|
|
b55fa5ec8f | |
|
|
16c6ce850e | |
|
|
12251e730f | |
|
|
925b10f9fc | |
|
|
306b653318 | |
|
|
8791a52b7e | |
|
|
0d9470a840 | |
|
|
0d320c20e0 | |
|
|
9b3ba2e1c6 | |
|
|
dbadbf0221 | |
|
|
173624c838 | |
|
|
de2047adf2 | |
|
|
5df2fe9f97 | |
|
|
2510ebaa79 | |
|
|
314f30a434 | |
|
|
c86a511638 | |
|
|
f1efaf4491 | |
|
|
716b4adef2 | |
|
|
5876623bb0 | |
|
|
6b9c7f7862 | |
|
|
7ea6387278 | |
|
|
4c2bc2f41f | |
|
|
c86f35f059 | |
|
|
3492566842 | |
|
|
349bbbb8fa | |
|
|
ead08981e7 | |
|
|
068cf825b9 | |
|
|
7269dfbdc5 | |
|
|
533708f885 | |
|
|
5e93ce258f | |
|
|
1236505502 | |
|
|
f7d451e689 | |
|
|
e11da6d2ae | |
|
|
d31b4cd6c8 | |
|
|
f4ec6bb303 | |
|
|
a6132bac38 | |
|
|
624870109e | |
|
|
cdf829de91 | |
|
|
6ef51dbfb0 | |
|
|
af5f0b9692 | |
|
|
bd0dcfff15 | |
|
|
ec4e48808e | |
|
|
c293901669 | |
|
|
f4e5a08614 | |
|
|
430a0ae6b4 | |
|
|
53d93880ad | |
|
|
1706698a83 |
|
|
@ -0,0 +1,15 @@
|
|||
[bans]
|
||||
multiple-versions = "deny"
|
||||
wildcards = "allow"
|
||||
highlight = "all"
|
||||
|
||||
# Explicitly flag the weak cryptography so the agent is forced to justify its existence
|
||||
[[bans.skip]]
|
||||
name = "md-5"
|
||||
version = "*"
|
||||
reason = "MUST VERIFY: Only allowed for legacy checksums, never for security."
|
||||
|
||||
[[bans.skip]]
|
||||
name = "sha1"
|
||||
version = "*"
|
||||
reason = "MUST VERIFY: Only allowed for backwards compatibility."
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
.git
|
||||
.github
|
||||
target
|
||||
.kilocode
|
||||
cache
|
||||
tlsfront
|
||||
*.tar
|
||||
*.tar.gz
|
||||
|
|
@ -7,7 +7,16 @@ queries:
|
|||
- uses: security-and-quality
|
||||
- uses: ./.github/codeql/queries
|
||||
|
||||
paths-ignore:
|
||||
- "**/tests/**"
|
||||
- "**/test/**"
|
||||
- "**/*_test.rs"
|
||||
- "**/*/tests.rs"
|
||||
query-filters:
|
||||
- exclude:
|
||||
tags:
|
||||
- test
|
||||
|
||||
- exclude:
|
||||
id:
|
||||
- rust/unwrap-on-option
|
||||
|
|
|
|||
|
|
@ -0,0 +1,126 @@
|
|||
# Architecture Directives
|
||||
|
||||
> Companion to `Agents.md`. These are **activation directives**, not tutorials.
|
||||
> You already know these patterns — apply them. When making any structural or
|
||||
> design decision, run the relevant section below as a checklist.
|
||||
|
||||
---
|
||||
|
||||
## 1. Active Principles (always on)
|
||||
|
||||
Apply these on every non-trivial change. No exceptions.
|
||||
|
||||
- **SRP** — one reason to change per component. If you can't name the responsibility in one noun phrase, split it.
|
||||
- **OCP** — extend by adding, not by modifying. New variants/impls over patching existing logic.
|
||||
- **ISP** — traits stay minimal. More than ~5 methods is a split signal.
|
||||
- **DIP** — high-level modules depend on traits, not concrete types. Infrastructure implements domain traits; it does not own domain logic.
|
||||
- **DRY** — one authoritative source per piece of knowledge. Copies are bugs that haven't diverged yet.
|
||||
- **YAGNI** — generic parameters, extension hooks, and pluggable strategies require an *existing* concrete use case, not a hypothetical one.
|
||||
- **KISS** — two equivalent designs: choose the one with fewer concepts. Justify complexity; never assume it.
|
||||
|
||||
---
|
||||
|
||||
## 2. Layered Architecture
|
||||
|
||||
Dependencies point **inward only**: `Presentation → Application → Domain ← Infrastructure`.
|
||||
|
||||
- Domain layer: zero I/O. No network, no filesystem, no async runtime imports.
|
||||
- Infrastructure: implements domain traits at the boundary. Never leaks SDK/wire types inward.
|
||||
- Anti-Corruption Layer (ACL): all third-party and external-protocol types are translated here. If the external format changes, only the ACL changes.
|
||||
- Presentation: translates wire/HTTP representations to domain types and back. Nothing else.
|
||||
|
||||
---
|
||||
|
||||
## 3. Design Pattern Selection
|
||||
|
||||
Apply the right pattern. Do not invent a new abstraction when a named pattern fits.
|
||||
|
||||
| Situation | Pattern to apply |
|
||||
|---|---|
|
||||
| Struct with 3+ optional/dependent fields | **Builder** — `build()` returns `Result`, never panics |
|
||||
| Cross-cutting behavior (logging, retry, metrics) on a trait impl | **Decorator** — implements same trait, delegates all calls |
|
||||
| Subsystem with multiple internal components | **Façade** — single public entry point, internals are `pub(crate)` |
|
||||
| Swappable algorithm or policy | **Strategy** — trait injection; generics for compile-time, `dyn` for runtime |
|
||||
| Component notifying decoupled consumers | **Observer** — typed channels (`broadcast`, `watch`), not callback `Vec<Box<dyn Fn>>` |
|
||||
| Exclusive mutable state serving concurrent callers | **Actor** — `mpsc` command channel + `oneshot` reply; no lock needed on state |
|
||||
| Finite state with invalid transition prevention | **Typestate** — distinct types per state; invalid ops are compile errors |
|
||||
| Fixed process skeleton with overridable steps | **Template Method** — defaulted trait method calls required hooks |
|
||||
| Request pipeline with independent handlers | **Chain/Middleware** — generic compile-time chain for hot paths, `dyn` for runtime assembly |
|
||||
| Hiding a concrete type behind a trait | **Factory Function** — returns `Box<dyn Trait>` or `impl Trait` |
|
||||
|
||||
---
|
||||
|
||||
## 4. Data Modeling Rules
|
||||
|
||||
- **Make illegal states unrepresentable.** Type system enforces invariants; runtime validation is a second line, not the first.
|
||||
- **Newtype every primitive** that carries domain meaning. `SessionId(u64)` ≠ `UserId(u64)` — the compiler enforces it.
|
||||
- **Enums over booleans** for any parameter or field with two or more named states.
|
||||
- **Typed error enums** with named variants carrying full diagnostic context. `anyhow` is application-layer only; never in library code.
|
||||
- **Domain types carry no I/O concerns.** No `serde`, no codec, no DB derives on domain structs. Conversions via `From`/`TryFrom` at layer boundaries.
|
||||
|
||||
---
|
||||
|
||||
## 5. Concurrency Rules
|
||||
|
||||
- Prefer message-passing over shared memory. Shared state is a fallback.
|
||||
- All channels must be **bounded**. Document the bound's rationale inline.
|
||||
- Never hold a lock across an `await` unless atomicity explicitly requires it — document why.
|
||||
- Document lock acquisition order wherever two locks are taken together.
|
||||
- Every `async fn` is cancellation-safe unless explicitly documented otherwise. Mutate shared state *after* the `await` that may be cancelled, not before.
|
||||
- High-read/low-write state: use `arc-swap` or `watch` for lock-free reads.
|
||||
|
||||
---
|
||||
|
||||
## 6. Error Handling Rules
|
||||
|
||||
- Errors translated at every layer boundary — low-level errors never surface unmodified.
|
||||
- Add context at the propagation site: what operation failed and where.
|
||||
- No `unwrap()`/`expect()` in production paths without a comment proving `None`/`Err` is impossible.
|
||||
- Panics are only permitted in: tests, startup/init unrecoverable failure, and `unreachable!()` with an invariant comment.
|
||||
|
||||
---
|
||||
|
||||
## 7. API Design Rules
|
||||
|
||||
- **CQS**: functions that return data must not mutate; functions that mutate return only `Result`.
|
||||
- **Least surprise**: a function does exactly what its name implies. Side effects are documented.
|
||||
- **Idempotency**: `close()`, `shutdown()`, `unregister()` called twice must not panic or error.
|
||||
- **Fallibility at the type level**: failure → `Result<T, E>`. No sentinel values.
|
||||
- **Minimal public surface**: default to `pub(crate)`. Mark `pub` only deliberate API. Re-export through a single surface in `mod.rs`.
|
||||
|
||||
---
|
||||
|
||||
## 8. Performance Rules (hot paths)
|
||||
|
||||
- Annotate hot-path functions with `// HOT PATH: <throughput requirement>`.
|
||||
- Zero allocations per operation in hot paths after initialization. Preallocate in constructors, reuse buffers.
|
||||
- Pass `&[u8]` / `Bytes` slices — not `Vec<u8>`. Use `BytesMut` for reusable mutable buffers.
|
||||
- No `String` formatting in hot paths. No logging without a rate-limit or sampling gate.
|
||||
- Any allocation in a hot path gets a comment: `// ALLOC: <reason and size>`.
|
||||
|
||||
---
|
||||
|
||||
## 9. Testing Rules
|
||||
|
||||
- Bug fixes require a regression test that is **red before the fix, green after**. Name it after the bug.
|
||||
- Property tests for: codec round-trips, state machine invariants, cryptographic protocol correctness.
|
||||
- No shared mutable state between tests. Each test constructs its own environment.
|
||||
- Test doubles hierarchy (simplest first): Fake → Stub → Spy → Mock. Mocks couple to implementation, not behavior — use sparingly.
|
||||
|
||||
---
|
||||
|
||||
## 10. Pre-Change Checklist
|
||||
|
||||
Run this before proposing or implementing any structural decision:
|
||||
|
||||
- [ ] Responsibility nameable in one noun phrase?
|
||||
- [ ] Layer dependencies point inward only?
|
||||
- [ ] Invalid states unrepresentable in the type system?
|
||||
- [ ] State transitions gated through a single interface?
|
||||
- [ ] All channels bounded?
|
||||
- [ ] No locks held across `await` (or documented)?
|
||||
- [ ] Errors typed and translated at layer boundaries?
|
||||
- [ ] No panics in production paths without invariant proof?
|
||||
- [ ] Hot paths annotated and allocation-free?
|
||||
- [ ] Public surface minimal — only deliberate API marked `pub`?
|
||||
- [ ] Correct pattern chosen from Section 3 table?
|
||||
|
|
@ -0,0 +1,135 @@
|
|||
---
|
||||
description: 'Rust programming language coding conventions and best practices'
|
||||
applyTo: '**/*.rs'
|
||||
---
|
||||
|
||||
# Rust Coding Conventions and Best Practices
|
||||
|
||||
Follow idiomatic Rust practices and community standards when writing Rust code.
|
||||
|
||||
These instructions are based on [The Rust Book](https://doc.rust-lang.org/book/), [Rust API Guidelines](https://rust-lang.github.io/api-guidelines/), [RFC 430 naming conventions](https://github.com/rust-lang/rfcs/blob/master/text/0430-finalizing-naming-conventions.md), and the broader Rust community at [users.rust-lang.org](https://users.rust-lang.org).
|
||||
|
||||
## General Instructions
|
||||
|
||||
- Always prioritize readability, safety, and maintainability.
|
||||
- Use strong typing and leverage Rust's ownership system for memory safety.
|
||||
- Break down complex functions into smaller, more manageable functions.
|
||||
- For algorithm-related code, include explanations of the approach used.
|
||||
- Write code with good maintainability practices, including comments on why certain design decisions were made.
|
||||
- Handle errors gracefully using `Result<T, E>` and provide meaningful error messages.
|
||||
- For external dependencies, mention their usage and purpose in documentation.
|
||||
- Use consistent naming conventions following [RFC 430](https://github.com/rust-lang/rfcs/blob/master/text/0430-finalizing-naming-conventions.md).
|
||||
- Write idiomatic, safe, and efficient Rust code that follows the borrow checker's rules.
|
||||
- Ensure code compiles without warnings.
|
||||
|
||||
## Patterns to Follow
|
||||
|
||||
- Use modules (`mod`) and public interfaces (`pub`) to encapsulate logic.
|
||||
- Handle errors properly using `?`, `match`, or `if let`.
|
||||
- Use `serde` for serialization and `thiserror` or `anyhow` for custom errors.
|
||||
- Implement traits to abstract services or external dependencies.
|
||||
- Structure async code using `async/await` and `tokio` or `async-std`.
|
||||
- Prefer enums over flags and states for type safety.
|
||||
- Use builders for complex object creation.
|
||||
- Split binary and library code (`main.rs` vs `lib.rs`) for testability and reuse.
|
||||
- Use `rayon` for data parallelism and CPU-bound tasks.
|
||||
- Use iterators instead of index-based loops as they're often faster and safer.
|
||||
- Use `&str` instead of `String` for function parameters when you don't need ownership.
|
||||
- Prefer borrowing and zero-copy operations to avoid unnecessary allocations.
|
||||
|
||||
### Ownership, Borrowing, and Lifetimes
|
||||
|
||||
- Prefer borrowing (`&T`) over cloning unless ownership transfer is necessary.
|
||||
- Use `&mut T` when you need to modify borrowed data.
|
||||
- Explicitly annotate lifetimes when the compiler cannot infer them.
|
||||
- Use `Rc<T>` for single-threaded reference counting and `Arc<T>` for thread-safe reference counting.
|
||||
- Use `RefCell<T>` for interior mutability in single-threaded contexts and `Mutex<T>` or `RwLock<T>` for multi-threaded contexts.
|
||||
|
||||
## Patterns to Avoid
|
||||
|
||||
- Don't use `unwrap()` or `expect()` unless absolutely necessary—prefer proper error handling.
|
||||
- Avoid panics in library code—return `Result` instead.
|
||||
- Don't rely on global mutable state—use dependency injection or thread-safe containers.
|
||||
- Avoid deeply nested logic—refactor with functions or combinators.
|
||||
- Don't ignore warnings—treat them as errors during CI.
|
||||
- Avoid `unsafe` unless required and fully documented.
|
||||
- Don't overuse `clone()`, use borrowing instead of cloning unless ownership transfer is needed.
|
||||
- Avoid premature `collect()`, keep iterators lazy until you actually need the collection.
|
||||
- Avoid unnecessary allocations—prefer borrowing and zero-copy operations.
|
||||
|
||||
## Code Style and Formatting
|
||||
|
||||
- Follow the Rust Style Guide and use `rustfmt` for automatic formatting.
|
||||
- Keep lines under 100 characters when possible.
|
||||
- Place function and struct documentation immediately before the item using `///`.
|
||||
- Use `cargo clippy` to catch common mistakes and enforce best practices.
|
||||
|
||||
## Error Handling
|
||||
|
||||
- Use `Result<T, E>` for recoverable errors and `panic!` only for unrecoverable errors.
|
||||
- Prefer `?` operator over `unwrap()` or `expect()` for error propagation.
|
||||
- Create custom error types using `thiserror` or implement `std::error::Error`.
|
||||
- Use `Option<T>` for values that may or may not exist.
|
||||
- Provide meaningful error messages and context.
|
||||
- Error types should be meaningful and well-behaved (implement standard traits).
|
||||
- Validate function arguments and return appropriate errors for invalid input.
|
||||
|
||||
## API Design Guidelines
|
||||
|
||||
### Common Traits Implementation
|
||||
Eagerly implement common traits where appropriate:
|
||||
- `Copy`, `Clone`, `Eq`, `PartialEq`, `Ord`, `PartialOrd`, `Hash`, `Debug`, `Display`, `Default`
|
||||
- Use standard conversion traits: `From`, `AsRef`, `AsMut`
|
||||
- Collections should implement `FromIterator` and `Extend`
|
||||
- Note: `Send` and `Sync` are auto-implemented by the compiler when safe; avoid manual implementation unless using `unsafe` code
|
||||
|
||||
### Type Safety and Predictability
|
||||
- Use newtypes to provide static distinctions
|
||||
- Arguments should convey meaning through types; prefer specific types over generic `bool` parameters
|
||||
- Use `Option<T>` appropriately for truly optional values
|
||||
- Functions with a clear receiver should be methods
|
||||
- Only smart pointers should implement `Deref` and `DerefMut`
|
||||
|
||||
### Future Proofing
|
||||
- Use sealed traits to protect against downstream implementations
|
||||
- Structs should have private fields
|
||||
- Functions should validate their arguments
|
||||
- All public types must implement `Debug`
|
||||
|
||||
## Testing and Documentation
|
||||
|
||||
- Write comprehensive unit tests using `#[cfg(test)]` modules and `#[test]` annotations.
|
||||
- Use test modules alongside the code they test (`mod tests { ... }`).
|
||||
- Write integration tests in `tests/` directory with descriptive filenames.
|
||||
- Write clear and concise comments for each function, struct, enum, and complex logic.
|
||||
- Ensure functions have descriptive names and include comprehensive documentation.
|
||||
- Document all public APIs with rustdoc (`///` comments) following the [API Guidelines](https://rust-lang.github.io/api-guidelines/).
|
||||
- Use `#[doc(hidden)]` to hide implementation details from public documentation.
|
||||
- Document error conditions, panic scenarios, and safety considerations.
|
||||
- Examples should use `?` operator, not `unwrap()` or deprecated `try!` macro.
|
||||
|
||||
## Project Organization
|
||||
|
||||
- Use semantic versioning in `Cargo.toml`.
|
||||
- Include comprehensive metadata: `description`, `license`, `repository`, `keywords`, `categories`.
|
||||
- Use feature flags for optional functionality.
|
||||
- Organize code into modules using `mod.rs` or named files.
|
||||
- Keep `main.rs` or `lib.rs` minimal - move logic to modules.
|
||||
|
||||
## Quality Checklist
|
||||
|
||||
Before publishing or reviewing Rust code, ensure:
|
||||
|
||||
### Core Requirements
|
||||
- [ ] **Naming**: Follows RFC 430 naming conventions
|
||||
- [ ] **Traits**: Implements `Debug`, `Clone`, `PartialEq` where appropriate
|
||||
- [ ] **Error Handling**: Uses `Result<T, E>` and provides meaningful error types
|
||||
- [ ] **Documentation**: All public items have rustdoc comments with examples
|
||||
- [ ] **Testing**: Comprehensive test coverage including edge cases
|
||||
|
||||
### Safety and Quality
|
||||
- [ ] **Safety**: No unnecessary `unsafe` code, proper error handling
|
||||
- [ ] **Performance**: Efficient use of iterators, minimal allocations
|
||||
- [ ] **API Design**: Functions are predictable, flexible, and type-safe
|
||||
- [ ] **Future Proofing**: Private fields in structs, sealed traits where appropriate
|
||||
- [ ] **Tooling**: Code passes `cargo fmt`, `cargo clippy`, and `cargo test`
|
||||
|
|
@ -0,0 +1,162 @@
|
|||
---
|
||||
description: 'Guidelines for GitHub Copilot to write comments to achieve self-explanatory code with less comments. Examples are in JavaScript but it should work on any language that has comments.'
|
||||
applyTo: '**'
|
||||
---
|
||||
|
||||
# Self-explanatory Code Commenting Instructions
|
||||
|
||||
## Core Principle
|
||||
**Write code that speaks for itself. Comment only when necessary to explain WHY, not WHAT.**
|
||||
We do not need comments most of the time.
|
||||
|
||||
## Commenting Guidelines
|
||||
|
||||
### ❌ AVOID These Comment Types
|
||||
|
||||
**Obvious Comments**
|
||||
```javascript
|
||||
// Bad: States the obvious
|
||||
let counter = 0; // Initialize counter to zero
|
||||
counter++; // Increment counter by one
|
||||
```
|
||||
|
||||
**Redundant Comments**
|
||||
```javascript
|
||||
// Bad: Comment repeats the code
|
||||
function getUserName() {
|
||||
return user.name; // Return the user's name
|
||||
}
|
||||
```
|
||||
|
||||
**Outdated Comments**
|
||||
```javascript
|
||||
// Bad: Comment doesn't match the code
|
||||
// Calculate tax at 5% rate
|
||||
const tax = price * 0.08; // Actually 8%
|
||||
```
|
||||
|
||||
### ✅ WRITE These Comment Types
|
||||
|
||||
**Complex Business Logic**
|
||||
```javascript
|
||||
// Good: Explains WHY this specific calculation
|
||||
// Apply progressive tax brackets: 10% up to 10k, 20% above
|
||||
const tax = calculateProgressiveTax(income, [0.10, 0.20], [10000]);
|
||||
```
|
||||
|
||||
**Non-obvious Algorithms**
|
||||
```javascript
|
||||
// Good: Explains the algorithm choice
|
||||
// Using Floyd-Warshall for all-pairs shortest paths
|
||||
// because we need distances between all nodes
|
||||
for (let k = 0; k < vertices; k++) {
|
||||
for (let i = 0; i < vertices; i++) {
|
||||
for (let j = 0; j < vertices; j++) {
|
||||
// ... implementation
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Regex Patterns**
|
||||
```javascript
|
||||
// Good: Explains what the regex matches
|
||||
// Match email format: username@domain.extension
|
||||
const emailPattern = /^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$/;
|
||||
```
|
||||
|
||||
**API Constraints or Gotchas**
|
||||
```javascript
|
||||
// Good: Explains external constraint
|
||||
// GitHub API rate limit: 5000 requests/hour for authenticated users
|
||||
await rateLimiter.wait();
|
||||
const response = await fetch(githubApiUrl);
|
||||
```
|
||||
|
||||
## Decision Framework
|
||||
|
||||
Before writing a comment, ask:
|
||||
1. **Is the code self-explanatory?** → No comment needed
|
||||
2. **Would a better variable/function name eliminate the need?** → Refactor instead
|
||||
3. **Does this explain WHY, not WHAT?** → Good comment
|
||||
4. **Will this help future maintainers?** → Good comment
|
||||
|
||||
## Special Cases for Comments
|
||||
|
||||
### Public APIs
|
||||
```javascript
|
||||
/**
|
||||
* Calculate compound interest using the standard formula.
|
||||
*
|
||||
* @param {number} principal - Initial amount invested
|
||||
* @param {number} rate - Annual interest rate (as decimal, e.g., 0.05 for 5%)
|
||||
* @param {number} time - Time period in years
|
||||
* @param {number} compoundFrequency - How many times per year interest compounds (default: 1)
|
||||
* @returns {number} Final amount after compound interest
|
||||
*/
|
||||
function calculateCompoundInterest(principal, rate, time, compoundFrequency = 1) {
|
||||
// ... implementation
|
||||
}
|
||||
```
|
||||
|
||||
### Configuration and Constants
|
||||
```javascript
|
||||
// Good: Explains the source or reasoning
|
||||
const MAX_RETRIES = 3; // Based on network reliability studies
|
||||
const API_TIMEOUT = 5000; // AWS Lambda timeout is 15s, leaving buffer
|
||||
```
|
||||
|
||||
### Annotations
|
||||
```javascript
|
||||
// TODO: Replace with proper user authentication after security review
|
||||
// FIXME: Memory leak in production - investigate connection pooling
|
||||
// HACK: Workaround for bug in library v2.1.0 - remove after upgrade
|
||||
// NOTE: This implementation assumes UTC timezone for all calculations
|
||||
// WARNING: This function modifies the original array instead of creating a copy
|
||||
// PERF: Consider caching this result if called frequently in hot path
|
||||
// SECURITY: Validate input to prevent SQL injection before using in query
|
||||
// BUG: Edge case failure when array is empty - needs investigation
|
||||
// REFACTOR: Extract this logic into separate utility function for reusability
|
||||
// DEPRECATED: Use newApiFunction() instead - this will be removed in v3.0
|
||||
```
|
||||
|
||||
## Anti-Patterns to Avoid
|
||||
|
||||
### Dead Code Comments
|
||||
```javascript
|
||||
// Bad: Don't comment out code
|
||||
// const oldFunction = () => { ... };
|
||||
const newFunction = () => { ... };
|
||||
```
|
||||
|
||||
### Changelog Comments
|
||||
```javascript
|
||||
// Bad: Don't maintain history in comments
|
||||
// Modified by John on 2023-01-15
|
||||
// Fixed bug reported by Sarah on 2023-02-03
|
||||
function processData() {
|
||||
// ... implementation
|
||||
}
|
||||
```
|
||||
|
||||
### Divider Comments
|
||||
```javascript
|
||||
// Bad: Don't use decorative comments
|
||||
//=====================================
|
||||
// UTILITY FUNCTIONS
|
||||
//=====================================
|
||||
```
|
||||
|
||||
## Quality Checklist
|
||||
|
||||
Before committing, ensure your comments:
|
||||
- [ ] Explain WHY, not WHAT
|
||||
- [ ] Are grammatically correct and clear
|
||||
- [ ] Will remain accurate as code evolves
|
||||
- [ ] Add genuine value to code understanding
|
||||
- [ ] Are placed appropriately (above the code they describe)
|
||||
- [ ] Use proper spelling and professional language
|
||||
|
||||
## Summary
|
||||
|
||||
Remember: **The best comment is the one you don't need to write because the code is self-documenting.**
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
name: Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install latest stable Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Cache cargo registry & build artifacts
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Build Release
|
||||
run: cargo build --release --verbose
|
||||
|
|
@ -5,37 +5,87 @@ on:
|
|||
tags:
|
||||
- '[0-9]+.[0-9]+.[0-9]+'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'Release tag (example: 3.3.15)'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
concurrency:
|
||||
group: release-${{ github.ref_name }}-${{ github.event.inputs.tag || 'auto' }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
BINARY_NAME: telemt
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build ${{ matrix.target }}
|
||||
prepare:
|
||||
name: Prepare
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
outputs:
|
||||
version: ${{ steps.vars.outputs.version }}
|
||||
prerelease: ${{ steps.vars.outputs.prerelease }}
|
||||
|
||||
steps:
|
||||
- name: Resolve version
|
||||
id: vars
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
if [ "${GITHUB_EVENT_NAME}" = "workflow_dispatch" ]; then
|
||||
VERSION="${{ github.event.inputs.tag }}"
|
||||
else
|
||||
VERSION="${GITHUB_REF#refs/tags/}"
|
||||
fi
|
||||
|
||||
VERSION="${VERSION#refs/tags/}"
|
||||
|
||||
if [ -z "${VERSION}" ]; then
|
||||
echo "Release version is empty" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${VERSION}" == *-* ]]; then
|
||||
PRERELEASE=true
|
||||
else
|
||||
PRERELEASE=false
|
||||
fi
|
||||
|
||||
echo "version=${VERSION}" >> "${GITHUB_OUTPUT}"
|
||||
echo "prerelease=${PRERELEASE}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
# ==========================
|
||||
# GNU / glibc
|
||||
# ==========================
|
||||
build-gnu:
|
||||
name: GNU ${{ matrix.asset }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: prepare
|
||||
|
||||
container:
|
||||
image: rust:slim-bookworm
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- target: x86_64-unknown-linux-gnu
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-x86_64-linux-gnu
|
||||
asset: telemt-x86_64-linux-gnu
|
||||
cpu: baseline
|
||||
|
||||
- target: x86_64-unknown-linux-gnu
|
||||
asset: telemt-x86_64-v3-linux-gnu
|
||||
cpu: v3
|
||||
|
||||
- target: aarch64-unknown-linux-gnu
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-aarch64-linux-gnu
|
||||
- target: x86_64-unknown-linux-musl
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-x86_64-linux-musl
|
||||
- target: aarch64-unknown-linux-musl
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-aarch64-linux-musl
|
||||
asset: telemt-aarch64-linux-gnu
|
||||
cpu: generic
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
|
@ -43,47 +93,261 @@ jobs:
|
|||
- uses: dtolnay/rust-toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
targets: ${{ matrix.target }}
|
||||
targets: |
|
||||
x86_64-unknown-linux-gnu
|
||||
aarch64-unknown-linux-gnu
|
||||
|
||||
- name: Install cross-compilation tools
|
||||
- name: Install deps
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu
|
||||
apt-get update
|
||||
apt-get install -y \
|
||||
build-essential \
|
||||
clang \
|
||||
lld \
|
||||
pkg-config \
|
||||
gcc-aarch64-linux-gnu \
|
||||
g++-aarch64-linux-gnu
|
||||
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
/usr/local/cargo/registry
|
||||
/usr/local/cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-${{ matrix.target }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
key: gnu-${{ matrix.asset }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.target }}-cargo-
|
||||
gnu-${{ matrix.asset }}-
|
||||
gnu-
|
||||
|
||||
- name: Install cross
|
||||
run: cargo install cross --git https://github.com/cross-rs/cross
|
||||
|
||||
- name: Build Release
|
||||
env:
|
||||
RUSTFLAGS: ${{ contains(matrix.target, 'musl') && '-C target-feature=+crt-static' || '' }}
|
||||
run: cross build --release --target ${{ matrix.target }}
|
||||
|
||||
- name: Package binary
|
||||
- name: Build
|
||||
shell: bash
|
||||
run: |
|
||||
cd target/${{ matrix.target }}/release
|
||||
tar -czvf ${{ matrix.asset_name }}.tar.gz ${{ matrix.artifact_name }}
|
||||
sha256sum ${{ matrix.asset_name }}.tar.gz > ${{ matrix.asset_name }}.sha256
|
||||
set -euo pipefail
|
||||
|
||||
if [ "${{ matrix.target }}" = "aarch64-unknown-linux-gnu" ]; then
|
||||
export CC=aarch64-linux-gnu-gcc
|
||||
export CXX=aarch64-linux-gnu-g++
|
||||
export RUSTFLAGS="-C linker=aarch64-linux-gnu-gcc -C lto=fat -C panic=abort"
|
||||
else
|
||||
export CC=clang
|
||||
export CXX=clang++
|
||||
|
||||
if [ "${{ matrix.cpu }}" = "v3" ]; then
|
||||
CPU_FLAGS="-C target-cpu=x86-64-v3"
|
||||
else
|
||||
CPU_FLAGS="-C target-cpu=x86-64"
|
||||
fi
|
||||
|
||||
export RUSTFLAGS="-C linker=clang -C link-arg=-fuse-ld=lld -C lto=fat -C panic=abort ${CPU_FLAGS}"
|
||||
fi
|
||||
|
||||
cargo build --release --target ${{ matrix.target }} -j "$(nproc)"
|
||||
|
||||
- name: Package
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
mkdir -p dist
|
||||
cp "target/${{ matrix.target }}/release/${{ env.BINARY_NAME }}" dist/telemt
|
||||
|
||||
if [ "${{ matrix.target }}" = "aarch64-unknown-linux-gnu" ]; then
|
||||
STRIP_BIN=aarch64-linux-gnu-strip
|
||||
else
|
||||
STRIP_BIN=strip
|
||||
fi
|
||||
|
||||
"${STRIP_BIN}" dist/telemt
|
||||
|
||||
cd dist
|
||||
tar -czf "${{ matrix.asset }}.tar.gz" \
|
||||
--owner=0 --group=0 --numeric-owner \
|
||||
telemt
|
||||
|
||||
sha256sum "${{ matrix.asset }}.tar.gz" > "${{ matrix.asset }}.tar.gz.sha256"
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.asset_name }}
|
||||
path: |
|
||||
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.tar.gz
|
||||
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.sha256
|
||||
name: ${{ matrix.asset }}
|
||||
path: dist/*
|
||||
|
||||
build-docker-image:
|
||||
needs: build
|
||||
# ==========================
|
||||
# MUSL
|
||||
# ==========================
|
||||
build-musl:
|
||||
name: MUSL ${{ matrix.asset }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: prepare
|
||||
|
||||
container:
|
||||
image: rust:slim-bookworm
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- target: x86_64-unknown-linux-musl
|
||||
asset: telemt-x86_64-linux-musl
|
||||
cpu: baseline
|
||||
|
||||
- target: x86_64-unknown-linux-musl
|
||||
asset: telemt-x86_64-v3-linux-musl
|
||||
cpu: v3
|
||||
|
||||
- target: aarch64-unknown-linux-musl
|
||||
asset: telemt-aarch64-linux-musl
|
||||
cpu: generic
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install deps
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install -y \
|
||||
musl-tools \
|
||||
pkg-config \
|
||||
curl
|
||||
|
||||
- uses: actions/cache@v4
|
||||
if: matrix.target == 'aarch64-unknown-linux-musl'
|
||||
with:
|
||||
path: ~/.musl-aarch64
|
||||
key: musl-toolchain-aarch64-v1
|
||||
|
||||
- name: Install aarch64 musl toolchain
|
||||
if: matrix.target == 'aarch64-unknown-linux-musl'
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
TOOLCHAIN_DIR="$HOME/.musl-aarch64"
|
||||
ARCHIVE="aarch64-linux-musl-cross.tgz"
|
||||
URL="https://github.com/telemt/telemt/releases/download/toolchains/${ARCHIVE}"
|
||||
|
||||
if [ -x "${TOOLCHAIN_DIR}/bin/aarch64-linux-musl-gcc" ]; then
|
||||
echo "MUSL toolchain cached"
|
||||
else
|
||||
curl -fL \
|
||||
--retry 5 \
|
||||
--retry-delay 3 \
|
||||
--connect-timeout 10 \
|
||||
--max-time 120 \
|
||||
-o "${ARCHIVE}" "${URL}"
|
||||
|
||||
mkdir -p "${TOOLCHAIN_DIR}"
|
||||
tar -xzf "${ARCHIVE}" --strip-components=1 -C "${TOOLCHAIN_DIR}"
|
||||
fi
|
||||
|
||||
echo "${TOOLCHAIN_DIR}/bin" >> "${GITHUB_PATH}"
|
||||
|
||||
- name: Add rust target
|
||||
run: rustup target add ${{ matrix.target }}
|
||||
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
/usr/local/cargo/registry
|
||||
/usr/local/cargo/git
|
||||
target
|
||||
key: musl-${{ matrix.asset }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
musl-${{ matrix.asset }}-
|
||||
musl-
|
||||
|
||||
- name: Build
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
if [ "${{ matrix.target }}" = "aarch64-unknown-linux-musl" ]; then
|
||||
export CC=aarch64-linux-musl-gcc
|
||||
export CC_aarch64_unknown_linux_musl=aarch64-linux-musl-gcc
|
||||
export RUSTFLAGS="-C target-feature=+crt-static -C linker=aarch64-linux-musl-gcc -C lto=fat -C panic=abort"
|
||||
else
|
||||
export CC=musl-gcc
|
||||
export CC_x86_64_unknown_linux_musl=musl-gcc
|
||||
|
||||
if [ "${{ matrix.cpu }}" = "v3" ]; then
|
||||
CPU_FLAGS="-C target-cpu=x86-64-v3"
|
||||
else
|
||||
CPU_FLAGS="-C target-cpu=x86-64"
|
||||
fi
|
||||
|
||||
export RUSTFLAGS="-C target-feature=+crt-static -C lto=fat -C panic=abort ${CPU_FLAGS}"
|
||||
fi
|
||||
|
||||
cargo build --release --target ${{ matrix.target }} -j "$(nproc)"
|
||||
|
||||
- name: Package
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
mkdir -p dist
|
||||
cp "target/${{ matrix.target }}/release/${{ env.BINARY_NAME }}" dist/telemt
|
||||
|
||||
if [ "${{ matrix.target }}" = "aarch64-unknown-linux-musl" ]; then
|
||||
STRIP_BIN=aarch64-linux-musl-strip
|
||||
else
|
||||
STRIP_BIN=strip
|
||||
fi
|
||||
|
||||
"${STRIP_BIN}" dist/telemt
|
||||
|
||||
cd dist
|
||||
tar -czf "${{ matrix.asset }}.tar.gz" \
|
||||
--owner=0 --group=0 --numeric-owner \
|
||||
telemt
|
||||
|
||||
sha256sum "${{ matrix.asset }}.tar.gz" > "${{ matrix.asset }}.tar.gz.sha256"
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.asset }}
|
||||
path: dist/*
|
||||
|
||||
# ==========================
|
||||
# Release
|
||||
# ==========================
|
||||
release:
|
||||
name: Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [prepare, build-gnu, build-musl]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Flatten artifacts
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
mkdir -p dist
|
||||
find artifacts -type f -exec cp {} dist/ \;
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
tag_name: ${{ needs.prepare.outputs.version }}
|
||||
target_commitish: ${{ github.sha }}
|
||||
files: dist/*
|
||||
generate_release_notes: true
|
||||
prerelease: ${{ needs.prepare.outputs.prerelease == 'true' }}
|
||||
overwrite_files: true
|
||||
|
||||
# ==========================
|
||||
# Docker
|
||||
# ==========================
|
||||
docker:
|
||||
name: Docker
|
||||
runs-on: ubuntu-latest
|
||||
needs: [prepare, release]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
|
@ -92,48 +356,66 @@ jobs:
|
|||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: docker/setup-qemu-action@v3
|
||||
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
- uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract version
|
||||
id: vars
|
||||
run: echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||
- name: Probe release assets
|
||||
shell: bash
|
||||
env:
|
||||
VERSION: ${{ needs.prepare.outputs.version }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
- name: Build and push
|
||||
for asset in \
|
||||
telemt-x86_64-linux-musl.tar.gz \
|
||||
telemt-x86_64-linux-musl.tar.gz.sha256 \
|
||||
telemt-aarch64-linux-musl.tar.gz \
|
||||
telemt-aarch64-linux-musl.tar.gz.sha256
|
||||
do
|
||||
curl -fsIL \
|
||||
--retry 10 \
|
||||
--retry-delay 3 \
|
||||
"https://github.com/${GITHUB_REPOSITORY}/releases/download/${VERSION}/${asset}" \
|
||||
> /dev/null
|
||||
done
|
||||
|
||||
- name: Compute image tags
|
||||
id: meta
|
||||
shell: bash
|
||||
env:
|
||||
VERSION: ${{ needs.prepare.outputs.version }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
IMAGE="$(echo "ghcr.io/${GITHUB_REPOSITORY}" | tr '[:upper:]' '[:lower:]')"
|
||||
TAGS="${IMAGE}:${VERSION}"
|
||||
|
||||
if [[ "${VERSION}" != *-* ]]; then
|
||||
TAGS="${TAGS}"$'\n'"${IMAGE}:latest"
|
||||
fi
|
||||
|
||||
{
|
||||
echo "tags<<EOF"
|
||||
printf '%s\n' "${TAGS}"
|
||||
echo "EOF"
|
||||
} >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Build & Push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
ghcr.io/${{ github.repository }}:${{ steps.vars.outputs.VERSION }}
|
||||
ghcr.io/${{ github.repository }}:latest
|
||||
|
||||
release:
|
||||
name: Create Release
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Create Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: artifacts/**/*
|
||||
generate_release_notes: true
|
||||
draft: false
|
||||
prerelease: ${{ contains(github.ref, '-rc') || contains(github.ref, '-beta') || contains(github.ref, '-alpha') }}
|
||||
pull: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
build-args: |
|
||||
TELEMT_REPOSITORY=${{ github.repository }}
|
||||
TELEMT_VERSION=${{ needs.prepare.outputs.version }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
|
|
|||
|
|
@ -1,54 +0,0 @@
|
|||
name: Rust
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write
|
||||
checks: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install latest stable Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: rustfmt, clippy
|
||||
|
||||
- name: Cache cargo registry & build artifacts
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Build Release
|
||||
run: cargo build --release --verbose
|
||||
|
||||
- name: Run tests
|
||||
run: cargo test --verbose
|
||||
|
||||
# clippy dont fail on warnings because of active development of telemt
|
||||
# and many warnings
|
||||
- name: Run clippy
|
||||
run: cargo clippy -- --cap-lints warn
|
||||
|
||||
- name: Check for unused dependencies
|
||||
run: cargo udeps || true
|
||||
|
|
@ -0,0 +1,139 @@
|
|||
name: Check
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
concurrency:
|
||||
group: test-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
# ==========================
|
||||
# Formatting
|
||||
# ==========================
|
||||
fmt:
|
||||
name: Fmt
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: rustfmt
|
||||
|
||||
- run: cargo fmt -- --check
|
||||
|
||||
# ==========================
|
||||
# Tests
|
||||
# ==========================
|
||||
test:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write
|
||||
checks: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-nextest-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-nextest-
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Install cargo-nextest
|
||||
run: cargo install --locked cargo-nextest || true
|
||||
|
||||
- name: Run tests with nextest
|
||||
run: cargo nextest run -j "$(nproc)"
|
||||
|
||||
# ==========================
|
||||
# Clippy
|
||||
# ==========================
|
||||
clippy:
|
||||
name: Clippy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
checks: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: clippy
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-clippy-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-clippy-
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Run clippy
|
||||
run: cargo clippy -j "$(nproc)" -- --cap-lints warn
|
||||
|
||||
# ==========================
|
||||
# Udeps
|
||||
# ==========================
|
||||
udeps:
|
||||
name: Udeps
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: rust-src
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-udeps-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-udeps-
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Install cargo-udeps
|
||||
run: cargo install --locked cargo-udeps || true
|
||||
|
||||
- name: Run udeps
|
||||
run: cargo udeps -j "$(nproc)" || true
|
||||
|
|
@ -21,3 +21,4 @@ target
|
|||
#.idea/
|
||||
|
||||
proxy-secret
|
||||
coverage-html/
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
# Architect Mode Rules for Telemt
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph Entry
|
||||
Client[Clients] --> Listener[TCP/Unix Listener]
|
||||
end
|
||||
|
||||
subgraph Proxy Layer
|
||||
Listener --> ClientHandler[ClientHandler]
|
||||
ClientHandler --> Handshake[Handshake Validator]
|
||||
Handshake --> |Valid| Relay[Relay Layer]
|
||||
Handshake --> |Invalid| Masking[Masking/TLS Fronting]
|
||||
end
|
||||
|
||||
subgraph Transport
|
||||
Relay --> MiddleProxy[Middle-End Proxy Pool]
|
||||
Relay --> DirectRelay[Direct DC Relay]
|
||||
MiddleProxy --> TelegramDC[Telegram DCs]
|
||||
DirectRelay --> TelegramDC
|
||||
end
|
||||
```
|
||||
|
||||
## Module Dependencies
|
||||
- [`src/main.rs`](src/main.rs) - Entry point, spawns all async tasks
|
||||
- [`src/config/`](src/config/) - Configuration loading with auto-migration
|
||||
- [`src/error.rs`](src/error.rs) - Error types, must be used by all modules
|
||||
- [`src/crypto/`](src/crypto/) - AES, SHA, random number generation
|
||||
- [`src/protocol/`](src/protocol/) - MTProto constants, frame encoding, obfuscation
|
||||
- [`src/stream/`](src/stream/) - Stream wrappers, buffer pool, frame codecs
|
||||
- [`src/proxy/`](src/proxy/) - Client handling, handshake, relay logic
|
||||
- [`src/transport/`](src/transport/) - Upstream management, middle-proxy, SOCKS support
|
||||
- [`src/stats/`](src/stats/) - Statistics and replay protection
|
||||
- [`src/ip_tracker.rs`](src/ip_tracker.rs) - Per-user IP tracking
|
||||
|
||||
## Key Architectural Constraints
|
||||
|
||||
### Middle-End Proxy Mode
|
||||
- Requires public IP on interface OR 1:1 NAT with STUN probing
|
||||
- Uses separate `proxy-secret` from Telegram (NOT user secrets)
|
||||
- Falls back to direct mode automatically on STUN mismatch
|
||||
|
||||
### TLS Fronting
|
||||
- Invalid handshakes are transparently proxied to `mask_host`
|
||||
- This is critical for DPI evasion - do not change this behavior
|
||||
- `mask_unix_sock` and `mask_host` are mutually exclusive
|
||||
|
||||
### Stream Architecture
|
||||
- Buffer pool is shared globally via Arc - prevents allocation storms
|
||||
- Frame codecs implement tokio-util Encoder/Decoder traits
|
||||
- State machine in [`src/stream/state.rs`](src/stream/state.rs) manages stream transitions
|
||||
|
||||
### Configuration Migration
|
||||
- [`ProxyConfig::load()`](src/config/mod.rs:641) mutates config in-place
|
||||
- New fields must have sensible defaults
|
||||
- DC203 override is auto-injected for CDN/media support
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
# Code Mode Rules for Telemt
|
||||
|
||||
## Error Handling
|
||||
- Always use [`ProxyError`](src/error.rs:168) from [`src/error.rs`](src/error.rs) for proxy operations
|
||||
- [`HandshakeResult<T,R,W>`](src/error.rs:292) returns streams on bad client - these MUST be returned for masking, never dropped
|
||||
- Use [`Recoverable`](src/error.rs:110) trait to check if errors are retryable
|
||||
|
||||
## Configuration Changes
|
||||
- [`ProxyConfig::load()`](src/config/mod.rs:641) auto-mutates config - new fields should have defaults
|
||||
- DC203 override is auto-injected if missing - do not remove this behavior
|
||||
- When adding config fields, add migration logic in [`ProxyConfig::load()`](src/config/mod.rs:641)
|
||||
|
||||
## Crypto Code
|
||||
- [`SecureRandom`](src/crypto/random.rs) from [`src/crypto/random.rs`](src/crypto/random.rs) must be used for all crypto operations
|
||||
- Never use `rand::thread_rng()` directly - use the shared `Arc<SecureRandom>`
|
||||
|
||||
## Stream Handling
|
||||
- Buffer pool [`BufferPool`](src/stream/buffer_pool.rs) is shared via Arc - always use it instead of allocating
|
||||
- Frame codecs in [`src/stream/frame_codec.rs`](src/stream/frame_codec.rs) implement tokio-util's Encoder/Decoder traits
|
||||
|
||||
## Testing
|
||||
- Tests are inline in modules using `#[cfg(test)]`
|
||||
- Use `cargo test --lib <module_name>` to run tests for specific modules
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
# Debug Mode Rules for Telemt
|
||||
|
||||
## Logging
|
||||
- `RUST_LOG` environment variable takes absolute priority over all config log levels
|
||||
- Log levels: `trace`, `debug`, `info`, `warn`, `error`
|
||||
- Use `RUST_LOG=debug cargo run` for detailed operational logs
|
||||
- Use `RUST_LOG=trace cargo run` for full protocol-level debugging
|
||||
|
||||
## Middle-End Proxy Debugging
|
||||
- Set `ME_DIAG=1` environment variable for high-precision cryptography diagnostics
|
||||
- STUN probe results are logged at startup - check for mismatch between local and reflected IP
|
||||
- If Middle-End fails, check `proxy_secret_path` points to valid file from https://core.telegram.org/getProxySecret
|
||||
|
||||
## Connection Issues
|
||||
- DC connectivity is logged at startup with RTT measurements
|
||||
- If DC ping fails, check `dc_overrides` for custom addresses
|
||||
- Use `prefer_ipv6=false` in config if IPv6 is unreliable
|
||||
|
||||
## TLS Fronting Issues
|
||||
- Invalid handshakes are proxied to `mask_host` - check this host is reachable
|
||||
- `mask_unix_sock` and `mask_host` are mutually exclusive - only one can be set
|
||||
- If `mask_unix_sock` is set, socket must exist before connections arrive
|
||||
|
||||
## Common Errors
|
||||
- `ReplayAttack` - client replayed a handshake nonce, potential attack
|
||||
- `TimeSkew` - client clock is off, can disable with `ignore_time_skew=true`
|
||||
- `TgHandshakeTimeout` - upstream DC connection failed, check network
|
||||
22
AGENTS.md
22
AGENTS.md
|
|
@ -5,6 +5,22 @@ Your responses are precise, minimal, and architecturally sound. You are working
|
|||
|
||||
---
|
||||
|
||||
### Context: The Telemt Project
|
||||
|
||||
You are working on **Telemt**, a high-performance, production-grade Telegram MTProxy implementation written in Rust. It is explicitly designed to operate in highly hostile network environments and evade advanced network censorship.
|
||||
|
||||
**Adversarial Threat Model:**
|
||||
The proxy operates under constant surveillance by DPI (Deep Packet Inspection) systems and active scanners (state firewalls, mobile operator fraud controls). These entities actively probe IPs, analyze protocol handshakes, and look for known proxy signatures to block or throttle traffic.
|
||||
|
||||
**Core Architectural Pillars:**
|
||||
1. **TLS-Fronting (TLS-F) & TCP-Splitting (TCP-S):** To the outside world, Telemt looks like a standard TLS server. If a client presents a valid MTProxy key, the connection is handled internally. If a censor's scanner, web browser, or unauthorized crawler connects, Telemt seamlessly splices the TCP connection (L4) to a real, legitimate HTTPS fallback server (e.g., Nginx) without modifying the `ClientHello` or terminating the TLS handshake.
|
||||
2. **Middle-End (ME) Orchestration:** A highly concurrent, generation-based pool managing upstream connections to Telegram Datacenters (DCs). It utilizes an **Adaptive Floor** (dynamically scaling writer connections based on traffic), **Hardswaps** (zero-downtime pool reconfiguration), and **STUN/NAT** reflection mechanisms.
|
||||
3. **Strict KDF Routing:** Cryptographic Key Derivation Functions (KDF) in this protocol strictly rely on the exact pairing of Source IP/Port and Destination IP/Port. Deviations or missing port logic will silently break the MTProto handshake.
|
||||
4. **Data Plane vs. Control Plane Isolation:** The Data Plane (readers, writers, payload relay, TCP splicing) must remain strictly non-blocking, zero-allocation in hot paths, and highly resilient to network backpressure. The Control Plane (API, metrics, pool generation swaps, config reloads) orchestrates the state asynchronously without stalling the Data Plane.
|
||||
|
||||
Any modification you make must preserve Telemt's invisibility to censors, its strict memory-safety invariants, and its hot-path throughput.
|
||||
|
||||
|
||||
### 0. Priority Resolution — Scope Control
|
||||
|
||||
This section resolves conflicts between code quality enforcement and scope limitation.
|
||||
|
|
@ -374,6 +390,12 @@ you MUST explain why existing invariants remain valid.
|
|||
- Do not modify existing tests unless the task explicitly requires it.
|
||||
- Do not weaken assertions.
|
||||
- Preserve determinism in testable components.
|
||||
- Bug-first forces the discipline of proving you understand a bug before you fix it. Tests written after a fix almost always pass trivially and catch nothing new.
|
||||
- Invariants over scenarios is the core shift. The route_mode table alone would have caught both BUG-1 and BUG-2 before they were written — "snapshot equals watch state after any transition burst" is a two-line property test that fails immediately on the current diverged-atomics code.
|
||||
- Differential/model catches logic drift over time.
|
||||
- Scheduler pressure is specifically aimed at the concurrent state bugs that keep reappearing. A single-threaded happy-path test of set_mode will never find subtle bugs; 10,000 concurrent calls will find it on the first run.
|
||||
- Mutation gate answers your original complaint directly. It measures test power. If you can remove a bounds check and nothing breaks, the suite isn't covering that branch yet — it just says so explicitly.
|
||||
- Dead parameter is a code smell rule.
|
||||
|
||||
### 15. Security Constraints
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,212 @@
|
|||
# Code of Conduct
|
||||
|
||||
## Purpose
|
||||
|
||||
**Telemt exists to solve technical problems.**
|
||||
|
||||
Telemt is open to contributors who want to learn, improve and build meaningful systems together.
|
||||
|
||||
It is a place for building, testing, reasoning, documenting, and improving systems.
|
||||
|
||||
Discussions that advance this work are in scope. Discussions that divert it are not.
|
||||
|
||||
Technology has consequences. Responsibility is inherent.
|
||||
|
||||
> **Zweck bestimmt die Form.**
|
||||
|
||||
> Purpose defines form.
|
||||
|
||||
---
|
||||
|
||||
## Principles
|
||||
|
||||
* **Technical over emotional**
|
||||
|
||||
Arguments are grounded in data, logs, reproducible cases, or clear reasoning.
|
||||
|
||||
* **Clarity over noise**
|
||||
|
||||
Communication is structured, concise, and relevant.
|
||||
|
||||
* **Openness with standards**
|
||||
|
||||
Participation is open. The work remains disciplined.
|
||||
|
||||
* **Independence of judgment**
|
||||
|
||||
Claims are evaluated on technical merit, not affiliation or posture.
|
||||
|
||||
* **Responsibility over capability**
|
||||
|
||||
Capability does not justify careless use.
|
||||
|
||||
* **Cooperation over friction**
|
||||
|
||||
Progress depends on coordination, mutual support, and honest review.
|
||||
|
||||
* **Good intent, rigorous method**
|
||||
|
||||
Assume good intent, but require rigor.
|
||||
|
||||
> **Aussagen gelten nach ihrer Begründung.**
|
||||
|
||||
> Claims are weighed by evidence.
|
||||
|
||||
---
|
||||
|
||||
## Expected Behavior
|
||||
|
||||
Participants are expected to:
|
||||
|
||||
* Communicate directly and respectfully
|
||||
* Support claims with evidence
|
||||
* Stay within technical scope
|
||||
* Accept critique and provide it constructively
|
||||
* Reduce noise, duplication, and ambiguity
|
||||
* Help others reach correct and reproducible outcomes
|
||||
* Act in a way that improves the system as a whole
|
||||
|
||||
Precision is learned.
|
||||
|
||||
New contributors are welcome. They are expected to grow into these standards. Existing contributors are expected to make that growth possible.
|
||||
|
||||
> **Wer behauptet, belegt.**
|
||||
|
||||
> Whoever claims, proves.
|
||||
|
||||
---
|
||||
|
||||
## Unacceptable Behavior
|
||||
|
||||
The following is not allowed:
|
||||
|
||||
* Personal attacks, insults, harassment, or intimidation
|
||||
* Repeatedly derailing discussion away from Telemt’s purpose
|
||||
* Spam, flooding, or repeated low-quality input
|
||||
* Misinformation presented as fact
|
||||
* Attempts to degrade, destabilize, or exhaust Telemt or its participants
|
||||
* Use of Telemt or its spaces to enable harm
|
||||
|
||||
Telemt is not a venue for disputes that displace technical work.
|
||||
Such discussions may be closed, removed, or redirected.
|
||||
|
||||
> **Störung ist kein Beitrag.**
|
||||
|
||||
> Disruption is not contribution.
|
||||
|
||||
---
|
||||
|
||||
## Security and Misuse
|
||||
|
||||
Telemt is intended for responsible use.
|
||||
|
||||
* Do not use it to plan, coordinate, or execute harm
|
||||
* Do not publish vulnerabilities without responsible disclosure
|
||||
* Report security issues privately where possible
|
||||
|
||||
Security is both technical and behavioral.
|
||||
|
||||
> **Verantwortung endet nicht am Code.**
|
||||
|
||||
> Responsibility does not end at the code.
|
||||
|
||||
---
|
||||
|
||||
## 6. Openness
|
||||
|
||||
Telemt is open to contributors of different backgrounds, experience levels, and working styles.
|
||||
|
||||
- Standards are public, legible, and applied to the work itself.
|
||||
- Questions are welcome. Careful disagreement is welcome. Honest correction is welcome.
|
||||
- Gatekeeping by obscurity, status signaling, or hostility is not.
|
||||
|
||||
---
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies to all official spaces:
|
||||
|
||||
* Source repositories (issues, pull requests, discussions)
|
||||
* Documentation
|
||||
* Communication channels associated with Telemt
|
||||
|
||||
---
|
||||
|
||||
## Maintainer Stewardship
|
||||
|
||||
Maintainers are responsible for final decisions in matters of conduct, scope, and direction.
|
||||
|
||||
This responsibility is stewardship:
|
||||
- preserving continuity,
|
||||
- protecting signal,
|
||||
- maintaining standards,
|
||||
- keeping Telemt workable for others.
|
||||
|
||||
Judgment should be exercised with restraint, consistency, and institutional responsibility.
|
||||
- Not every decision requires extended debate.
|
||||
- Not every intervention requires public explanation.
|
||||
|
||||
All decisions are expected to serve the durability, clarity, and integrity of Telemt.
|
||||
|
||||
> **Ordnung ist Voraussetzung der Funktion.**
|
||||
|
||||
> Order is the precondition of function.
|
||||
|
||||
---
|
||||
|
||||
## Enforcement
|
||||
|
||||
Maintainers may act to preserve the integrity of Telemt, including by:
|
||||
|
||||
* Removing content
|
||||
* Locking discussions
|
||||
* Rejecting contributions
|
||||
* Restricting or banning participants
|
||||
|
||||
Actions are taken to maintain function, continuity, and signal quality.
|
||||
- Where possible, correction is preferred to exclusion.
|
||||
- Where necessary, exclusion is preferred to decay.
|
||||
|
||||
---
|
||||
|
||||
## Final
|
||||
|
||||
Telemt is built on discipline, structure, and shared intent.
|
||||
- Signal over noise.
|
||||
- Facts over opinion.
|
||||
- Systems over rhetoric.
|
||||
|
||||
- Work is collective.
|
||||
- Outcomes are shared.
|
||||
- Responsibility is distributed.
|
||||
|
||||
- Precision is learned.
|
||||
- Rigor is expected.
|
||||
- Help is part of the work.
|
||||
|
||||
> **Ordnung ist Voraussetzung der Freiheit.**
|
||||
|
||||
- If you contribute — contribute with care.
|
||||
- If you speak — speak with substance.
|
||||
- If you engage — engage constructively.
|
||||
|
||||
---
|
||||
|
||||
## After All
|
||||
|
||||
Systems outlive intentions.
|
||||
- What is built will be used.
|
||||
- What is released will propagate.
|
||||
- What is maintained will define the future state.
|
||||
|
||||
There is no neutral infrastructure, only infrastructure shaped well or poorly.
|
||||
|
||||
> **Jedes System trägt Verantwortung.**
|
||||
|
||||
> Every system carries responsibility.
|
||||
|
||||
- Stability requires discipline.
|
||||
- Freedom requires structure.
|
||||
- Trust requires honesty.
|
||||
|
||||
In the end: the system reflects its contributors.
|
||||
|
|
@ -1,14 +1,82 @@
|
|||
# Pull Requests - Rules
|
||||
# Issues
|
||||
## Warnung
|
||||
Before opening Issue, if it is more question than problem or bug - ask about that [in our chat](https://t.me/telemtrs)
|
||||
|
||||
## What it is not
|
||||
- NOT Question and Answer
|
||||
- NOT Helpdesk
|
||||
|
||||
***Each of your Issues triggers attempts to reproduce problems and analyze them, which are done manually by people***
|
||||
|
||||
---
|
||||
|
||||
# Pull Requests
|
||||
|
||||
## General
|
||||
- ONLY signed and verified commits
|
||||
- ONLY from your name
|
||||
- DO NOT commit with `codex` or `claude` as author/commiter
|
||||
- DO NOT commit with `codex`, `claude`, or other AI tools as author/committer
|
||||
- PREFER `flow` branch for development, not `main`
|
||||
|
||||
## AI
|
||||
We are not against modern tools, like AI, where you act as a principal or architect, but we consider it important:
|
||||
---
|
||||
|
||||
- you really understand what you're doing
|
||||
- you understand the relationships and dependencies of the components being modified
|
||||
- you understand the architecture of Telegram MTProto, MTProxy, Middle-End KDF at least generically
|
||||
- you DO NOT commit for the sake of commits, but to help the community, core-developers and ordinary users
|
||||
## Definition of Ready (MANDATORY)
|
||||
|
||||
A Pull Request WILL be ignored or closed if:
|
||||
|
||||
- it does NOT build
|
||||
- it does NOT pass tests
|
||||
- it does NOT follow formatting rules
|
||||
- it contains unrelated or excessive changes
|
||||
- the author cannot clearly explain the change
|
||||
|
||||
---
|
||||
|
||||
## Blessed Principles
|
||||
- PR must build
|
||||
- PR must pass tests
|
||||
- PR must be understood by author
|
||||
|
||||
---
|
||||
|
||||
## AI Usage Policy
|
||||
|
||||
AI tools (Claude, ChatGPT, Codex, DeepSeek, etc.) are allowed as **assistants**, NOT as decision-makers.
|
||||
|
||||
By submitting a PR, you confirm that:
|
||||
|
||||
- you fully understand the code you submit
|
||||
- you verified correctness manually
|
||||
- you reviewed architecture and dependencies
|
||||
- you take full responsibility for the change
|
||||
|
||||
AI-generated code is treated as **draft** and must be validated like any other external contribution.
|
||||
|
||||
PRs that look like unverified AI dumps WILL be closed
|
||||
|
||||
---
|
||||
|
||||
## Maintainer Policy
|
||||
|
||||
Maintainers reserve the right to:
|
||||
|
||||
- close PRs that do not meet basic quality requirements
|
||||
- request explanations before review
|
||||
- ignore low-effort contributions
|
||||
|
||||
Respect the reviewers time
|
||||
|
||||
---
|
||||
|
||||
## Enforcement
|
||||
|
||||
Pull Requests that violate project standards may be closed without review.
|
||||
|
||||
This includes (but is not limited to):
|
||||
|
||||
- non-building code
|
||||
- failing tests
|
||||
- unverified or low-effort changes
|
||||
- inability to explain the change
|
||||
|
||||
These actions follow the Code of Conduct and are intended to preserve signal, quality, and Telemt's integrity
|
||||
File diff suppressed because it is too large
Load Diff
54
Cargo.toml
54
Cargo.toml
|
|
@ -1,8 +1,11 @@
|
|||
[package]
|
||||
name = "telemt"
|
||||
version = "3.1.5"
|
||||
version = "3.3.38"
|
||||
edition = "2024"
|
||||
|
||||
[features]
|
||||
redteam_offline_expected_fail = []
|
||||
|
||||
[dependencies]
|
||||
# C
|
||||
libc = "0.2"
|
||||
|
|
@ -22,26 +25,37 @@ hmac = "0.12"
|
|||
crc32fast = "1.4"
|
||||
crc32c = "0.6"
|
||||
zeroize = { version = "1.8", features = ["derive"] }
|
||||
subtle = "2.6"
|
||||
static_assertions = "1.1"
|
||||
|
||||
# Network
|
||||
socket2 = { version = "0.5", features = ["all"] }
|
||||
nix = { version = "0.28", default-features = false, features = ["net"] }
|
||||
socket2 = { version = "0.6", features = ["all"] }
|
||||
nix = { version = "0.31", default-features = false, features = [
|
||||
"net",
|
||||
"user",
|
||||
"process",
|
||||
"fs",
|
||||
"signal",
|
||||
] }
|
||||
shadowsocks = { version = "1.24", features = ["aead-cipher-2022"] }
|
||||
|
||||
# Serialization
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
toml = "0.8"
|
||||
x509-parser = "0.15"
|
||||
toml = "1.0"
|
||||
x509-parser = "0.18"
|
||||
|
||||
# Utils
|
||||
bytes = "1.9"
|
||||
thiserror = "2.0"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
tracing-appender = "0.2"
|
||||
parking_lot = "0.12"
|
||||
dashmap = "5.5"
|
||||
dashmap = "6.1"
|
||||
arc-swap = "1.7"
|
||||
lru = "0.16"
|
||||
rand = "0.9"
|
||||
rand = "0.10"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
hex = "0.4"
|
||||
base64 = "0.22"
|
||||
|
|
@ -50,26 +64,38 @@ regex = "1.11"
|
|||
crossbeam-queue = "0.3"
|
||||
num-bigint = "0.4"
|
||||
num-traits = "0.2"
|
||||
x25519-dalek = "2"
|
||||
anyhow = "1.0"
|
||||
|
||||
# HTTP
|
||||
reqwest = { version = "0.12", features = ["rustls-tls"], default-features = false }
|
||||
notify = { version = "6", features = ["macos_fsevent"] }
|
||||
ipnetwork = "0.20"
|
||||
reqwest = { version = "0.13", features = ["rustls"], default-features = false }
|
||||
notify = "8.2"
|
||||
ipnetwork = { version = "0.21", features = ["serde"] }
|
||||
hyper = { version = "1", features = ["server", "http1"] }
|
||||
hyper-util = { version = "0.1", features = ["tokio", "server-auto"] }
|
||||
http-body-util = "0.1"
|
||||
httpdate = "1.0"
|
||||
tokio-rustls = { version = "0.26", default-features = false, features = ["tls12"] }
|
||||
rustls = { version = "0.23", default-features = false, features = ["std", "tls12", "ring"] }
|
||||
webpki-roots = "0.26"
|
||||
tokio-rustls = { version = "0.26", default-features = false, features = [
|
||||
"tls12",
|
||||
] }
|
||||
rustls = { version = "0.23", default-features = false, features = [
|
||||
"std",
|
||||
"tls12",
|
||||
"ring",
|
||||
] }
|
||||
webpki-roots = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-test = "0.4"
|
||||
criterion = "0.5"
|
||||
criterion = "0.8"
|
||||
proptest = "1.4"
|
||||
futures = "0.3"
|
||||
|
||||
[[bench]]
|
||||
name = "crypto_bench"
|
||||
harness = false
|
||||
|
||||
[profile.release]
|
||||
lto = "fat"
|
||||
codegen-units = 1
|
||||
|
||||
|
|
|
|||
113
Dockerfile
113
Dockerfile
|
|
@ -1,43 +1,98 @@
|
|||
# ==========================
|
||||
# Stage 1: Build
|
||||
# ==========================
|
||||
FROM rust:1.88-slim-bookworm AS builder
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
pkg-config \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
COPY Cargo.toml Cargo.lock* ./
|
||||
RUN mkdir src && echo 'fn main() {}' > src/main.rs && \
|
||||
cargo build --release 2>/dev/null || true && \
|
||||
rm -rf src
|
||||
|
||||
COPY . .
|
||||
RUN cargo build --release && strip target/release/telemt
|
||||
ARG TELEMT_REPOSITORY=telemt/telemt
|
||||
ARG TELEMT_VERSION=latest
|
||||
|
||||
# ==========================
|
||||
# Stage 2: Runtime
|
||||
# Minimal Image
|
||||
# ==========================
|
||||
FROM debian:bookworm-slim
|
||||
FROM debian:12-slim AS minimal
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
ARG TARGETARCH
|
||||
ARG TELEMT_REPOSITORY
|
||||
ARG TELEMT_VERSION
|
||||
|
||||
RUN useradd -r -s /usr/sbin/nologin telemt
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
binutils \
|
||||
ca-certificates \
|
||||
curl \
|
||||
tar; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN set -eux; \
|
||||
case "${TARGETARCH}" in \
|
||||
amd64) ASSET="telemt-x86_64-linux-musl.tar.gz" ;; \
|
||||
arm64) ASSET="telemt-aarch64-linux-musl.tar.gz" ;; \
|
||||
*) echo "Unsupported TARGETARCH: ${TARGETARCH}" >&2; exit 1 ;; \
|
||||
esac; \
|
||||
VERSION="${TELEMT_VERSION#refs/tags/}"; \
|
||||
if [ -z "${VERSION}" ] || [ "${VERSION}" = "latest" ]; then \
|
||||
BASE_URL="https://github.com/${TELEMT_REPOSITORY}/releases/latest/download"; \
|
||||
else \
|
||||
BASE_URL="https://github.com/${TELEMT_REPOSITORY}/releases/download/${VERSION}"; \
|
||||
fi; \
|
||||
curl -fL \
|
||||
--retry 5 \
|
||||
--retry-delay 3 \
|
||||
--connect-timeout 10 \
|
||||
--max-time 120 \
|
||||
-o "/tmp/${ASSET}" \
|
||||
"${BASE_URL}/${ASSET}"; \
|
||||
curl -fL \
|
||||
--retry 5 \
|
||||
--retry-delay 3 \
|
||||
--connect-timeout 10 \
|
||||
--max-time 120 \
|
||||
-o "/tmp/${ASSET}.sha256" \
|
||||
"${BASE_URL}/${ASSET}.sha256"; \
|
||||
cd /tmp; \
|
||||
sha256sum -c "${ASSET}.sha256"; \
|
||||
tar -xzf "${ASSET}" -C /tmp; \
|
||||
test -f /tmp/telemt; \
|
||||
install -m 0755 /tmp/telemt /telemt; \
|
||||
strip --strip-unneeded /telemt || true; \
|
||||
rm -f "/tmp/${ASSET}" "/tmp/${ASSET}.sha256" /tmp/telemt
|
||||
|
||||
# ==========================
|
||||
# Debug Image
|
||||
# ==========================
|
||||
FROM debian:12-slim AS debug
|
||||
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
curl \
|
||||
iproute2 \
|
||||
busybox; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=builder /build/target/release/telemt /app/telemt
|
||||
COPY --from=minimal /telemt /app/telemt
|
||||
COPY config.toml /app/config.toml
|
||||
|
||||
RUN chown -R telemt:telemt /app
|
||||
USER telemt
|
||||
|
||||
EXPOSE 443
|
||||
EXPOSE 9090
|
||||
EXPOSE 443 9090 9091
|
||||
|
||||
ENTRYPOINT ["/app/telemt"]
|
||||
CMD ["config.toml"]
|
||||
|
||||
# ==========================
|
||||
# Production Distroless on MUSL
|
||||
# ==========================
|
||||
FROM gcr.io/distroless/static-debian12 AS prod
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=minimal /telemt /app/telemt
|
||||
COPY config.toml /app/config.toml
|
||||
|
||||
USER nonroot:nonroot
|
||||
|
||||
EXPOSE 443 9090 9091
|
||||
|
||||
ENTRYPOINT ["/app/telemt"]
|
||||
CMD ["config.toml"]
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,165 @@
|
|||
###### TELEMT Public License 3 ######
|
||||
##### Copyright (c) 2026 Telemt #####
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this Software and associated documentation files (the "Software"),
|
||||
to use, reproduce, modify, prepare derivative works of, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
persons to whom the Software is furnished to do so, provided that all
|
||||
copyright notices, license terms, and conditions set forth in this License
|
||||
are preserved and complied with.
|
||||
|
||||
### Official Translations
|
||||
|
||||
The canonical version of this License is the English version.
|
||||
Official translations are provided for informational purposes only
|
||||
and for convenience, and do not have legal force. In case of any
|
||||
discrepancy, the English version of this License shall prevail.
|
||||
Available versions:
|
||||
- English in Markdown: docs/LICENSE/LICENSE.md
|
||||
- German: docs/LICENSE/LICENSE.de.md
|
||||
- Russian: docs/LICENSE/LICENSE.ru.md
|
||||
|
||||
### License Versioning Policy
|
||||
|
||||
This License is version 3 of the TELEMT Public License.
|
||||
Each version of the Software is licensed under the License that
|
||||
accompanies its corresponding source code distribution.
|
||||
|
||||
Future versions of the Software may be distributed under a different
|
||||
version of the TELEMT Public License or under a different license,
|
||||
as determined by the Telemt maintainers.
|
||||
|
||||
Any such change of license applies only to the versions of the
|
||||
Software distributed with the new license and SHALL NOT retroactively
|
||||
affect any previously released versions of the Software.
|
||||
|
||||
Recipients of the Software are granted rights only under the License
|
||||
provided with the version of the Software they received.
|
||||
|
||||
Redistributions of the Software, including Modified Versions, MUST
|
||||
preserve the copyright notices, license text, and conditions of this
|
||||
License for all portions of the Software derived from Telemt.
|
||||
|
||||
Additional terms or licenses may be applied to modifications or
|
||||
additional code added by a redistributor, provided that such terms
|
||||
do not restrict or alter the rights granted under this License for
|
||||
the original Telemt Software.
|
||||
|
||||
Nothing in this section limits the rights granted under this License
|
||||
for versions of the Software already released.
|
||||
|
||||
### Definitions
|
||||
|
||||
For the purposes of this License:
|
||||
- "Software" means the Telemt software, including source code, documentation,
|
||||
and any associated files distributed under this License.
|
||||
- "Contributor" means any person or entity that submits code, patches,
|
||||
documentation, or other contributions to the Software that are accepted
|
||||
into the Software by the maintainers.
|
||||
- "Contribution" means any work of authorship intentionally submitted
|
||||
to the Software for inclusion in the Software.
|
||||
- "Modified Version" means any version of the Software that has been
|
||||
changed, adapted, extended, or otherwise modified from the original
|
||||
Software.
|
||||
- "Maintainers" means the individuals or entities responsible for
|
||||
the official Telemt project and its releases.
|
||||
|
||||
#### 1 Attribution
|
||||
|
||||
Redistributions of the Software, in source or binary form, MUST RETAIN the
|
||||
above copyright notice, this license text, and any existing attribution
|
||||
notices.
|
||||
|
||||
#### 2 Modification Notice
|
||||
|
||||
If you modify the Software, you MUST clearly state that the Software has been
|
||||
modified and include a brief description of the changes made.
|
||||
|
||||
Modified versions MUST NOT be presented as the original Telemt.
|
||||
|
||||
#### 3 Trademark and Branding
|
||||
|
||||
This license DOES NOT grant permission to use the name "Telemt",
|
||||
the Telemt logo, or any Telemt trademarks or branding.
|
||||
|
||||
Redistributed or modified versions of the Software MAY NOT use the Telemt
|
||||
name in a way that suggests endorsement or official origin without explicit
|
||||
permission from the Telemt maintainers.
|
||||
|
||||
Use of the name "Telemt" to describe a modified version of the Software
|
||||
is permitted only if the modified version is clearly identified as a
|
||||
modified or unofficial version.
|
||||
|
||||
Any distribution that could reasonably confuse users into believing that
|
||||
the software is an official Telemt release is prohibited.
|
||||
|
||||
#### 4 Binary Distribution Transparency
|
||||
|
||||
If you distribute compiled binaries of the Software,
|
||||
you are ENCOURAGED to provide access to the corresponding
|
||||
source code and build instructions where reasonably possible.
|
||||
|
||||
This helps preserve transparency and allows recipients to verify the
|
||||
integrity and reproducibility of distributed builds.
|
||||
|
||||
#### 5 Patent Grant and Defensive Termination Clause
|
||||
|
||||
Each contributor grants you a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Software.
|
||||
|
||||
This patent license applies only to those patent claims necessarily
|
||||
infringed by the contributor’s contribution alone or by combination of
|
||||
their contribution with the Software.
|
||||
|
||||
If you initiate or participate in any patent litigation, including
|
||||
cross-claims or counterclaims, alleging that the Software or any
|
||||
contribution incorporated within the Software constitutes patent
|
||||
infringement, then **all rights granted to you under this license shall
|
||||
terminate immediately** as of the date such litigation is filed.
|
||||
|
||||
Additionally, if you initiate legal action alleging that the
|
||||
Software itself infringes your patent or other intellectual
|
||||
property rights, then all rights granted to you under this
|
||||
license SHALL TERMINATE automatically.
|
||||
|
||||
#### 6 Contributions
|
||||
|
||||
Unless you explicitly state otherwise, any Contribution intentionally
|
||||
submitted for inclusion in the Software shall be licensed under the terms
|
||||
of this License.
|
||||
|
||||
By submitting a Contribution, you grant the Telemt maintainers and all
|
||||
recipients of the Software the rights described in this License with
|
||||
respect to that Contribution.
|
||||
|
||||
#### 7 Network Use Attribution
|
||||
|
||||
If the Software is used to provide a publicly accessible network service,
|
||||
the operator of such service SHOULD provide attribution to Telemt in at least
|
||||
one of the following locations:
|
||||
|
||||
- service documentation
|
||||
- service description
|
||||
- an "About" or similar informational page
|
||||
- other user-visible materials reasonably associated with the service
|
||||
|
||||
Such attribution MUST NOT imply endorsement by the Telemt project or its
|
||||
maintainers.
|
||||
|
||||
#### 8 Disclaimer of Warranty and Severability Clause
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
USE OR OTHER DEALINGS IN THE SOFTWARE
|
||||
|
||||
IF ANY PROVISION OF THIS LICENSE IS HELD TO BE INVALID OR UNENFORCEABLE,
|
||||
SUCH PROVISION SHALL BE INTERPRETED TO REFLECT THE ORIGINAL INTENT
|
||||
OF THE PARTIES AS CLOSELY AS POSSIBLE, AND THE REMAINING PROVISIONS
|
||||
SHALL REMAIN IN FULL FORCE AND EFFECT
|
||||
19
LICENSING.md
19
LICENSING.md
|
|
@ -1,17 +1,12 @@
|
|||
# LICENSING
|
||||
## Licenses for Versions
|
||||
| Version | License |
|
||||
|---------|---------------|
|
||||
| 1.0 | NO LICNESE |
|
||||
| 1.1 | NO LICENSE |
|
||||
| 1.2 | NO LICENSE |
|
||||
| 2.0 | NO LICENSE |
|
||||
| 3.0 | TELEMT UL 1 |
|
||||
| Version ≥ | Version ≤ | License |
|
||||
|-----------|-----------|---------------|
|
||||
| 1.0 | 3.3.17 | NO LICNESE |
|
||||
| 3.3.18 | 3.4.0 | TELEMT PL 3 |
|
||||
|
||||
### License Types
|
||||
- **NO LICENSE** = ***ALL RIGHT RESERVED***
|
||||
- **TELEMT UL1** - work in progress license for source code of `telemt`, which encourages:
|
||||
- fair use,
|
||||
- contributions,
|
||||
- distribution,
|
||||
- but prohibits NOT mentioning the authors
|
||||
- **TELEMT PL** - special Telemt Public License based on Apache License 2 principles
|
||||
|
||||
## [Telemt Public License 3](https://github.com/telemt/telemt/blob/main/LICENSE)
|
||||
|
|
|
|||
322
README.md
322
README.md
|
|
@ -1,104 +1,23 @@
|
|||
# Telemt - MTProxy on Rust + Tokio
|
||||
|
||||
**Telemt** is a fast, secure, and feature-rich server written in Rust: it fully implements the official Telegram proxy algo and adds many production-ready improvements such as connection pooling, replay protection, detailed statistics, masking from "prying" eyes
|
||||
***Löst Probleme, bevor andere überhaupt wissen, dass sie existieren*** / ***It solves problems before others even realize they exist***
|
||||
|
||||
[**Telemt Chat in Telegram**](https://t.me/telemtrs)
|
||||
### [**Telemt Chat in Telegram**](https://t.me/telemtrs)
|
||||
#### Fixed TLS ClientHello is now available in Telegram Desktop starting from version 6.7.2: to work with EE-MTProxy, please update your client;
|
||||
#### Fixed TLS ClientHello for Telegram Android Client is available in [our chat](https://t.me/telemtrs/30234/36441); official releases for Android and iOS are "work in progress";
|
||||
|
||||
## NEWS and EMERGENCY
|
||||
### ✈️ Telemt 3 is released!
|
||||
<table>
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
### 🇷🇺 RU
|
||||
|
||||
#### Релиз 3.0.15 — 25 февраля
|
||||
|
||||
25 февраля мы выпустили версию **3.0.15**
|
||||
|
||||
Мы предполагаем, что она станет завершающей версией поколения 3.0 и уже сейчас мы рассматриваем её как **LTS-кандидата** для версии **3.1.0**!
|
||||
|
||||
После нескольких дней детального анализа особенностей работы Middle-End мы спроектировали и реализовали продуманный режим **ротации ME Writer**. Данный режим позволяет поддерживать стабильно высокую производительность в long-run сценариях без возникновения ошибок, связанных с некорректной конфигурацией прокси
|
||||
|
||||
Будем рады вашему фидбеку и предложениям по улучшению — особенно в части **статистики** и **UX**
|
||||
|
||||
Релиз:
|
||||
[3.0.15](https://github.com/telemt/telemt/releases/tag/3.0.15)
|
||||
|
||||
---
|
||||
|
||||
Если у вас есть компетенции в:
|
||||
|
||||
- Асинхронных сетевых приложениях
|
||||
- Анализе трафика
|
||||
- Реверс-инжиниринге
|
||||
- Сетевых расследованиях
|
||||
|
||||
Мы открыты к архитектурным предложениям, идеям и pull requests
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
### 🇬🇧 EN
|
||||
|
||||
#### Release 3.0.15 — February 25
|
||||
|
||||
On February 25, we released version **3.0.15**
|
||||
|
||||
We expect this to become the final release of the 3.0 generation and at this point, we already see it as a strong **LTS candidate** for the upcoming **3.1.0** release!
|
||||
|
||||
After several days of deep analysis of Middle-End behavior, we designed and implemented a well-engineered **ME Writer rotation mode**. This mode enables sustained high throughput in long-run scenarios while preventing proxy misconfiguration errors
|
||||
|
||||
We are looking forward to your feedback and improvement proposals — especially regarding **statistics** and **UX**
|
||||
|
||||
Release:
|
||||
[3.0.15](https://github.com/telemt/telemt/releases/tag/3.0.15)
|
||||
|
||||
---
|
||||
|
||||
If you have expertise in:
|
||||
|
||||
- Asynchronous network applications
|
||||
- Traffic analysis
|
||||
- Reverse engineering
|
||||
- Network forensics
|
||||
|
||||
We welcome ideas, architectural feedback, and pull requests.
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
# Features
|
||||
💥 The configuration structure has changed since version 1.1.0.0. change it in your environment!
|
||||
**Telemt** is a fast, secure, and feature-rich server written in Rust: it fully implements the official Telegram proxy algo and adds many production-ready improvements such as:
|
||||
- [ME Pool + Reader/Writer + Registry + Refill + Adaptive Floor + Trio-State + Generation Lifecycle](https://github.com/telemt/telemt/blob/main/docs/model/MODEL.en.md)
|
||||
- [Full-covered API w/ management](https://github.com/telemt/telemt/blob/main/docs/API.md)
|
||||
- Anti-Replay on Sliding Window
|
||||
- Prometheus-format Metrics
|
||||
- TLS-Fronting and TCP-Splicing for masking from "prying" eyes
|
||||
|
||||
⚓ Our implementation of **TLS-fronting** is one of the most deeply debugged, focused, advanced and *almost* **"behaviorally consistent to real"**: we are confident we have it right - [see evidence on our validation and traces](#recognizability-for-dpi-and-crawler)
|
||||
|
||||
⚓ Our ***Middle-End Pool*** is fastest by design in standard scenarios, compared to other implementations of connecting to the Middle-End Proxy: non dramatically, but usual
|
||||
|
||||
# GOTO
|
||||
- [Features](#features)
|
||||
- [Quick Start Guide](#quick-start-guide)
|
||||
- [How to use?](#how-to-use)
|
||||
- [Systemd Method](#telemt-via-systemd)
|
||||
- [Configuration](#configuration)
|
||||
- [Minimal Configuration](#minimal-configuration-for-first-start)
|
||||
- [Advanced](#advanced)
|
||||
- [Adtag](#adtag)
|
||||
- [Listening and Announce IPs](#listening-and-announce-ips)
|
||||
- [Upstream Manager](#upstream-manager)
|
||||
- [IP](#bind-on-ip)
|
||||
- [SOCKS](#socks45-as-upstream)
|
||||
- [FAQ](#faq)
|
||||
- [Recognizability for DPI + crawler](#recognizability-for-dpi-and-crawler)
|
||||
- [Telegram Calls](#telegram-calls-via-mtproxy)
|
||||
- [DPI](#how-does-dpi-see-mtproxy-tls)
|
||||
- [Whitelist on Network Level](#whitelist-on-ip)
|
||||
- [Too many open files](#too-many-open-files)
|
||||
- [Build](#build)
|
||||
- [Docker](#docker)
|
||||
- [Why Rust?](#why-rust)
|
||||
|
||||
## Features
|
||||
|
||||
- Full support for all official MTProto proxy modes:
|
||||
- Classic
|
||||
- Secure - with `dd` prefix
|
||||
|
|
@ -109,161 +28,38 @@ We welcome ideas, architectural feedback, and pull requests.
|
|||
- Graceful shutdown on Ctrl+C
|
||||
- Extensive logging via `trace` and `debug` with `RUST_LOG` method
|
||||
|
||||
# GOTO
|
||||
- [Quick Start Guide](#quick-start-guide)
|
||||
- [FAQ](#faq)
|
||||
- [Recognizability for DPI and crawler](#recognizability-for-dpi-and-crawler)
|
||||
- [Client WITH secret-key accesses the MTProxy resource:](#client-with-secret-key-accesses-the-mtproxy-resource)
|
||||
- [Client WITHOUT secret-key gets transparent access to the specified resource:](#client-without-secret-key-gets-transparent-access-to-the-specified-resource)
|
||||
- [Telegram Calls via MTProxy](#telegram-calls-via-mtproxy)
|
||||
- [How does DPI see MTProxy TLS?](#how-does-dpi-see-mtproxy-tls)
|
||||
- [Whitelist on IP](#whitelist-on-ip)
|
||||
- [Too many open files](#too-many-open-files)
|
||||
- [Build](#build)
|
||||
- [Why Rust?](#why-rust)
|
||||
- [Issues](#issues)
|
||||
- [Roadmap](#roadmap)
|
||||
|
||||
|
||||
## Quick Start Guide
|
||||
**This software is designed for Debian-based OS: in addition to Debian, these are Ubuntu, Mint, Kali, MX and many other Linux**
|
||||
1. Download release
|
||||
```bash
|
||||
wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz
|
||||
```
|
||||
2. Move to Bin Folder
|
||||
```bash
|
||||
mv telemt /bin
|
||||
```
|
||||
4. Make Executable
|
||||
```bash
|
||||
chmod +x /bin/telemt
|
||||
```
|
||||
5. Go to [How to use?](#how-to-use) section for for further steps
|
||||
|
||||
## How to use?
|
||||
### Telemt via Systemd
|
||||
**This instruction "assume" that you:**
|
||||
- logged in as root or executed `su -` / `sudo su`
|
||||
- you already have an assembled and executable `telemt` in /bin folder as a result of the [Quick Start Guide](#quick-start-guide) or [Build](#build)
|
||||
|
||||
**0. Check port and generate secrets**
|
||||
|
||||
The port you have selected for use should be MISSING from the list, when:
|
||||
```bash
|
||||
netstat -lnp
|
||||
```
|
||||
|
||||
Generate 16 bytes/32 characters HEX with OpenSSL or another way:
|
||||
```bash
|
||||
openssl rand -hex 16
|
||||
```
|
||||
OR
|
||||
```bash
|
||||
xxd -l 16 -p /dev/urandom
|
||||
```
|
||||
OR
|
||||
```bash
|
||||
python3 -c 'import os; print(os.urandom(16).hex())'
|
||||
```
|
||||
|
||||
**1. Place your config to /etc/telemt.toml**
|
||||
|
||||
Open nano
|
||||
```bash
|
||||
nano /etc/telemt.toml
|
||||
```
|
||||
paste your config from [Configuration](#configuration) section
|
||||
|
||||
then Ctrl+X -> Y -> Enter to save
|
||||
|
||||
**2. Create service on /etc/systemd/system/telemt.service**
|
||||
|
||||
Open nano
|
||||
```bash
|
||||
nano /etc/systemd/system/telemt.service
|
||||
```
|
||||
paste this Systemd Module
|
||||
```bash
|
||||
[Unit]
|
||||
Description=Telemt
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/bin
|
||||
ExecStart=/bin/telemt /etc/telemt.toml
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
then Ctrl+X -> Y -> Enter to save
|
||||
|
||||
**3.** In Shell type `systemctl start telemt` - it must start with zero exit-code
|
||||
|
||||
**4.** In Shell type `systemctl status telemt` - there you can reach info about current MTProxy status
|
||||
|
||||
**5.** In Shell type `systemctl enable telemt` - then telemt will start with system startup, after the network is up
|
||||
|
||||
**6.** In Shell type `journalctl -u telemt -n -g "links" --no-pager -o cat | tac` - get the connection links
|
||||
|
||||
## Configuration
|
||||
### Minimal Configuration for First Start
|
||||
```toml
|
||||
# === General Settings ===
|
||||
[general]
|
||||
# ad_tag = "00000000000000000000000000000000"
|
||||
|
||||
[general.modes]
|
||||
classic = false
|
||||
secure = false
|
||||
tls = true
|
||||
|
||||
# === Anti-Censorship & Masking ===
|
||||
[censorship]
|
||||
tls_domain = "petrovich.ru"
|
||||
|
||||
[access.users]
|
||||
# format: "username" = "32_hex_chars_secret"
|
||||
hello = "00000000000000000000000000000000"
|
||||
|
||||
```
|
||||
### Advanced
|
||||
#### Adtag (per-user)
|
||||
To use channel advertising and usage statistics from Telegram, get an Adtag from [@mtproxybot](https://t.me/mtproxybot). Set it per user in `[access.user_ad_tags]` (32 hex chars):
|
||||
```toml
|
||||
[access.user_ad_tags]
|
||||
username1 = "11111111111111111111111111111111" # Replace with your tag from @mtproxybot
|
||||
username2 = "22222222222222222222222222222222"
|
||||
```
|
||||
#### Listening and Announce IPs
|
||||
To specify listening address and/or address in links, add to section `[[server.listeners]]` of config.toml:
|
||||
```toml
|
||||
[[server.listeners]]
|
||||
ip = "0.0.0.0" # 0.0.0.0 = all IPs; your IP = specific listening
|
||||
announce_ip = "1.2.3.4" # IP in links; comment with # if not used
|
||||
```
|
||||
#### Upstream Manager
|
||||
To specify upstream, add to section `[[upstreams]]` of config.toml:
|
||||
##### Bind on IP
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 1
|
||||
enabled = true
|
||||
interface = "192.168.1.100" # Change to your outgoing IP
|
||||
```
|
||||
##### SOCKS4/5 as Upstream
|
||||
- Without Auth:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
enabled = true
|
||||
```
|
||||
|
||||
- With Auth:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||
username = "user" # Username for Auth on SOCKS-server
|
||||
password = "pass" # Password for Auth on SOCKS-server
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
enabled = true
|
||||
```
|
||||
- [Quick Start Guide RU](docs/QUICK_START_GUIDE.ru.md)
|
||||
- [Quick Start Guide EN](docs/QUICK_START_GUIDE.en.md)
|
||||
|
||||
## FAQ
|
||||
|
||||
- [FAQ RU](docs/FAQ.ru.md)
|
||||
- [FAQ EN](docs/FAQ.en.md)
|
||||
|
||||
### Recognizability for DPI and crawler
|
||||
Since version 1.1.0.0, we have debugged masking perfectly: for all clients without "presenting" a key,
|
||||
we transparently direct traffic to the target host!
|
||||
|
||||
On April 1, 2026, we became aware of a method for detecting MTProxy Fake-TLS,
|
||||
based on the ECH extension and the ordering of cipher suites,
|
||||
as well as an overall unique JA3/JA4 fingerprint
|
||||
that does not occur in modern browsers:
|
||||
we have already submitted initial changes to the Telegram Desktop developers and are working on updates for other clients.
|
||||
|
||||
- We consider this a breakthrough aspect, which has no stable analogues today
|
||||
- Based on this: if `telemt` configured correctly, **TLS mode is completely identical to real-life handshake + communication** with a specified host
|
||||
|
|
@ -397,6 +193,11 @@ git clone https://github.com/telemt/telemt
|
|||
cd telemt
|
||||
# Starting Release Build
|
||||
cargo build --release
|
||||
|
||||
# Low-RAM devices (1 GB, e.g. NanoPi Neo3 / Raspberry Pi Zero 2):
|
||||
# release profile uses lto = "thin" to reduce peak linker memory.
|
||||
# If your custom toolchain overrides profiles, avoid enabling fat LTO.
|
||||
|
||||
# Move to /bin
|
||||
mv ./target/release/telemt /bin
|
||||
# Make executable
|
||||
|
|
@ -405,40 +206,11 @@ chmod +x /bin/telemt
|
|||
telemt config.toml
|
||||
```
|
||||
|
||||
## Docker
|
||||
**Quick start (Docker Compose)**
|
||||
### OpenBSD
|
||||
- Build and service setup guide: [OpenBSD Guide (EN)](docs/OPENBSD.en.md)
|
||||
- Example rc.d script: [contrib/openbsd/telemt.rcd](contrib/openbsd/telemt.rcd)
|
||||
- Status: OpenBSD sandbox hardening with `pledge(2)` and `unveil(2)` is not implemented yet.
|
||||
|
||||
1. Edit `config.toml` in repo root (at least: port, users secrets, tls_domain)
|
||||
2. Start container:
|
||||
```bash
|
||||
docker compose up -d --build
|
||||
```
|
||||
3. Check logs:
|
||||
```bash
|
||||
docker compose logs -f telemt
|
||||
```
|
||||
4. Stop:
|
||||
```bash
|
||||
docker compose down
|
||||
```
|
||||
|
||||
**Notes**
|
||||
- `docker-compose.yml` maps `./config.toml` to `/app/config.toml` (read-only)
|
||||
- By default it publishes `443:443` and runs with dropped capabilities (only `NET_BIND_SERVICE` is added)
|
||||
- If you really need host networking (usually only for some IPv6 setups) uncomment `network_mode: host`
|
||||
|
||||
**Run without Compose**
|
||||
```bash
|
||||
docker build -t telemt:local .
|
||||
docker run --name telemt --restart unless-stopped \
|
||||
-p 443:443 \
|
||||
-e RUST_LOG=info \
|
||||
-v "$PWD/config.toml:/app/config.toml:ro" \
|
||||
--read-only \
|
||||
--cap-drop ALL --cap-add NET_BIND_SERVICE \
|
||||
--ulimit nofile=65536:65536 \
|
||||
telemt:local
|
||||
```
|
||||
|
||||
## Why Rust?
|
||||
- Long-running reliability and idempotent behavior
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
// Cryptobench
|
||||
use criterion::{black_box, criterion_group, Criterion};
|
||||
use criterion::{Criterion, black_box, criterion_group};
|
||||
|
||||
fn bench_aes_ctr(c: &mut Criterion) {
|
||||
c.bench_function("aes_ctr_encrypt_64kb", |b| {
|
||||
|
|
@ -9,4 +9,4 @@ fn bench_aes_ctr(c: &mut Criterion) {
|
|||
black_box(enc.encrypt(&data))
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
|||
697
config.full.toml
697
config.full.toml
|
|
@ -1,697 +0,0 @@
|
|||
# ==============================================================================
|
||||
#
|
||||
# TELEMT — Advanced Rust-based Telegram MTProto Proxy
|
||||
# Full Configuration Reference
|
||||
#
|
||||
# This file is both a working config and a complete documentation.
|
||||
# Every parameter is explained. Read it top to bottom before deploying.
|
||||
#
|
||||
# Quick Start:
|
||||
# 1. Set [server].port to your desired port (443 recommended)
|
||||
# 2. Generate a secret: openssl rand -hex 16
|
||||
# 3. Put it in [access.users] under a name you choose
|
||||
# 4. Set [censorship].tls_domain to a popular unblocked HTTPS site
|
||||
# 5. Set your public IP in [general].middle_proxy_nat_ip
|
||||
# and [general.links].public_host
|
||||
# 6. Set announce IP in [[server.listeners]]
|
||||
# 7. Run Telemt. It prints a tg:// link. Send it to your users.
|
||||
#
|
||||
# Modes of Operation:
|
||||
# Direct Mode (use_middle_proxy = false)
|
||||
# Connects straight to Telegram DCs via TCP. Simple, fast, low overhead.
|
||||
# No ad_tag support. No CDN DC support (203, etc).
|
||||
#
|
||||
# Middle-Proxy Mode (use_middle_proxy = true)
|
||||
# Connects to Telegram Middle-End servers via RPC protocol.
|
||||
# Required for ad_tag monetization and CDN support.
|
||||
# Requires proxy_secret_path and a valid public IP.
|
||||
#
|
||||
# ==============================================================================
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# LEGACY TOP-LEVEL FIELDS
|
||||
# ==============================================================================
|
||||
|
||||
# Deprecated. Use [general.links].show instead.
|
||||
# Accepts "*" for all users, or an array like ["alice", "bob"].
|
||||
show_link = ["0"]
|
||||
|
||||
# Fallback Datacenter index (1-5) when a client requests an unknown DC ID.
|
||||
# DC 2 is Amsterdam (Europe), closest for most CIS users.
|
||||
# default_dc = 2
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# GENERAL SETTINGS
|
||||
# ==============================================================================
|
||||
|
||||
[general]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Core Protocol
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Coalesce the MTProto handshake and first data payload into a single TCP packet.
|
||||
# Significantly reduces connection latency. No reason to disable.
|
||||
fast_mode = true
|
||||
|
||||
# How the proxy connects to Telegram servers.
|
||||
# false = Direct TCP to Telegram DCs (simple, low overhead)
|
||||
# true = Middle-End RPC protocol (required for ad_tag and CDN DCs)
|
||||
use_middle_proxy = true
|
||||
|
||||
# 32-char hex Ad-Tag from @MTProxybot for sponsored channel injection.
|
||||
# Only works when use_middle_proxy = true.
|
||||
# Obtain yours: message @MTProxybot on Telegram, register your proxy.
|
||||
# ad_tag = "00000000000000000000000000000000"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Middle-End Authentication
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Path to the Telegram infrastructure AES key file.
|
||||
# Auto-downloaded from https://core.telegram.org/getProxySecret on first run.
|
||||
# This key authenticates your proxy with Middle-End servers.
|
||||
proxy_secret_path = "proxy-secret"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public IP Configuration (Critical for Middle-Proxy Mode)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Your server's PUBLIC IPv4 address.
|
||||
# Middle-End servers need this for the cryptographic Key Derivation Function.
|
||||
# If your server has a direct public IP, set it here.
|
||||
# If behind NAT (AWS, Docker, etc.), this MUST be your external IP.
|
||||
# If omitted, Telemt uses STUN to auto-detect (see middle_proxy_nat_probe).
|
||||
# middle_proxy_nat_ip = "203.0.113.10"
|
||||
|
||||
# Auto-detect public IP via STUN servers defined in [network].
|
||||
# Set to false if you hardcoded middle_proxy_nat_ip above.
|
||||
# Set to true if you want automatic detection.
|
||||
middle_proxy_nat_probe = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Middle-End Connection Pool
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Number of persistent multiplexed RPC connections to ME servers.
|
||||
# All client traffic is routed through these "fat pipes".
|
||||
# 8 handles thousands of concurrent users comfortably.
|
||||
middle_proxy_pool_size = 8
|
||||
|
||||
# Legacy field. Connections kept initialized but idle as warm standby.
|
||||
middle_proxy_warm_standby = 16
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Middle-End Keepalive
|
||||
# Telegram ME servers aggressively kill idle TCP connections.
|
||||
# These settings send periodic RPC_PING frames to keep pipes alive.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
me_keepalive_enabled = true
|
||||
|
||||
# Base interval between pings in seconds.
|
||||
me_keepalive_interval_secs = 25
|
||||
|
||||
# Random jitter added to interval to prevent all connections pinging simultaneously.
|
||||
me_keepalive_jitter_secs = 5
|
||||
|
||||
# Randomize ping payload bytes to prevent DPI from fingerprinting ping patterns.
|
||||
me_keepalive_payload_random = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Client-Side Limits
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Max buffered ciphertext per client (bytes) when upstream is slow.
|
||||
# Acts as backpressure to prevent memory exhaustion. 256KB is safe.
|
||||
crypto_pending_buffer = 262144
|
||||
|
||||
# Maximum single MTProto frame size from client. 16MB is protocol standard.
|
||||
max_client_frame = 16777216
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Crypto Desynchronization Logging
|
||||
# Desync errors usually mean DPI/GFW is tampering with connections.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# true = full forensics (trace ID, IP hash, hex dumps) for EVERY desync event
|
||||
# false = deduplicated logging, one entry per time window (prevents log spam)
|
||||
# Set true if you are actively debugging DPI interference.
|
||||
desync_all_full = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Beobachten — Built-in Honeypot / Active Probe Tracker
|
||||
# Tracks IPs that fail handshakes or behave like TLS scanners.
|
||||
# Output file can be fed into fail2ban or iptables for auto-blocking.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
beobachten = true
|
||||
|
||||
# How long (minutes) to remember a suspicious IP before expiring it.
|
||||
beobachten_minutes = 30
|
||||
|
||||
# How often (seconds) to flush tracker state to disk.
|
||||
beobachten_flush_secs = 15
|
||||
|
||||
# File path for the tracker output.
|
||||
beobachten_file = "cache/beobachten.txt"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Hardswap — Zero-Downtime ME Pool Rotation
|
||||
# When Telegram updates ME server IPs, Hardswap creates a completely new pool,
|
||||
# waits until it is fully ready, migrates traffic, then kills the old pool.
|
||||
# Users experience zero interruption.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
hardswap = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ME Pool Warmup Staggering
|
||||
# When creating a new pool, connections are opened one by one with delays
|
||||
# to avoid a burst of SYN packets that could trigger ISP flood protection.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
me_warmup_stagger_enabled = true
|
||||
|
||||
# Delay between each connection creation (milliseconds).
|
||||
me_warmup_step_delay_ms = 500
|
||||
|
||||
# Random jitter added to the delay (milliseconds).
|
||||
me_warmup_step_jitter_ms = 300
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ME Reconnect Backoff
|
||||
# If an ME server drops the connection, Telemt retries with this strategy.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Max simultaneous reconnect attempts per DC.
|
||||
me_reconnect_max_concurrent_per_dc = 8
|
||||
|
||||
# Exponential backoff base (milliseconds).
|
||||
me_reconnect_backoff_base_ms = 500
|
||||
|
||||
# Backoff ceiling (milliseconds). Will never wait longer than this.
|
||||
me_reconnect_backoff_cap_ms = 30000
|
||||
|
||||
# Number of instant retries before switching to exponential backoff.
|
||||
me_reconnect_fast_retry_count = 12
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# NAT Mismatch Behavior
|
||||
# If STUN-detected IP differs from local interface IP (you are behind NAT).
|
||||
# false = abort ME mode (safe default)
|
||||
# true = force ME mode anyway (use if you know your NAT setup is correct)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
stun_iface_mismatch_ignore = false
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Logging
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# File to log unknown DC requests (DC IDs outside standard 1-5).
|
||||
unknown_dc_log_path = "unknown-dc.txt"
|
||||
|
||||
# Verbosity: "debug" | "verbose" | "normal" | "silent"
|
||||
log_level = "normal"
|
||||
|
||||
# Disable ANSI color codes in log output (useful for file logging).
|
||||
disable_colors = false
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# FakeTLS Record Sizing
|
||||
# Buffer small MTProto packets into larger TLS records to mimic real HTTPS.
|
||||
# Real HTTPS servers send records close to MTU size (~1400 bytes).
|
||||
# A stream of tiny TLS records is a strong DPI signal.
|
||||
# Set to 0 to disable. Set to 1400 for realistic HTTPS emulation.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
fast_mode_min_tls_record = 1400
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Periodic Updates
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# How often (seconds) to re-fetch ME server lists and proxy secrets
|
||||
# from core.telegram.org. Keeps your proxy in sync with Telegram infrastructure.
|
||||
update_every = 300
|
||||
|
||||
# How often (seconds) to force a Hardswap even if the ME map is unchanged.
|
||||
# Shorter intervals mean shorter-lived TCP flows, harder for DPI to profile.
|
||||
me_reinit_every_secs = 600
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Hardswap Warmup Tuning
|
||||
# Fine-grained control over how the new pool is warmed up before traffic switch.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
me_hardswap_warmup_delay_min_ms = 1000
|
||||
me_hardswap_warmup_delay_max_ms = 2000
|
||||
me_hardswap_warmup_extra_passes = 3
|
||||
me_hardswap_warmup_pass_backoff_base_ms = 500
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Config Update Debouncing
|
||||
# Telegram sometimes pushes transient/broken configs. Debouncing requires
|
||||
# N consecutive identical fetches before applying a change.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# ME server list must be identical for this many fetches before applying.
|
||||
me_config_stable_snapshots = 2
|
||||
|
||||
# Minimum seconds between config applications.
|
||||
me_config_apply_cooldown_secs = 300
|
||||
|
||||
# Proxy secret must be identical for this many fetches before applying.
|
||||
proxy_secret_stable_snapshots = 2
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Proxy Secret Rotation
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Apply newly downloaded secrets at runtime without restart.
|
||||
proxy_secret_rotate_runtime = true
|
||||
|
||||
# Maximum acceptable secret length (bytes). Rejects abnormally large secrets.
|
||||
proxy_secret_len_max = 256
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Hardswap Drain Settings
|
||||
# Controls graceful shutdown of old ME connections during pool rotation.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Seconds to keep old connections alive for in-flight data before force-closing.
|
||||
me_pool_drain_ttl_secs = 90
|
||||
|
||||
# Minimum ratio of healthy connections in new pool before draining old pool.
|
||||
# 0.8 = at least 80% of new pool must be ready.
|
||||
me_pool_min_fresh_ratio = 0.8
|
||||
|
||||
# Maximum seconds to wait for drain to complete before force-killing.
|
||||
me_reinit_drain_timeout_secs = 120
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# NTP Clock Check
|
||||
# MTProto uses timestamps. Clock drift > 30 seconds breaks handshakes.
|
||||
# Telemt checks on startup and warns if out of sync.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
ntp_check = true
|
||||
ntp_servers = ["pool.ntp.org"]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Auto-Degradation
|
||||
# If ME servers become completely unreachable (ISP blocking),
|
||||
# automatically fall back to Direct Mode so users stay connected.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
auto_degradation_enabled = true
|
||||
|
||||
# Number of DC groups that must be unreachable before triggering fallback.
|
||||
degradation_min_unavailable_dc_groups = 2
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# ALLOWED CLIENT PROTOCOLS
|
||||
# Only enable what you need. In censored regions, TLS-only is safest.
|
||||
# ==============================================================================
|
||||
|
||||
[general.modes]
|
||||
|
||||
# Classic MTProto. Unobfuscated length prefixes. Trivially detected by DPI.
|
||||
# No reason to enable unless you have ancient clients.
|
||||
classic = false
|
||||
|
||||
# Obfuscated MTProto with randomized padding. Better than classic, but
|
||||
# still detectable by statistical analysis of packet sizes.
|
||||
secure = false
|
||||
|
||||
# FakeTLS (ee-secrets). Wraps MTProto in TLS 1.3 framing.
|
||||
# To DPI, it looks like a normal HTTPS connection.
|
||||
# This should be the ONLY enabled mode in censored environments.
|
||||
tls = true
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# STARTUP LINK GENERATION
|
||||
# Controls what tg:// invite links are printed to console on startup.
|
||||
# ==============================================================================
|
||||
|
||||
[general.links]
|
||||
|
||||
# Which users to generate links for.
|
||||
# "*" = all users, or an array like ["alice", "bob"].
|
||||
show = "*"
|
||||
|
||||
# IP or domain to embed in the tg:// link.
|
||||
# If omitted, Telemt uses STUN to auto-detect.
|
||||
# Set this to your server's public IP or domain for reliable links.
|
||||
# public_host = "proxy.example.com"
|
||||
|
||||
# Port to embed in the tg:// link.
|
||||
# If omitted, uses [server].port.
|
||||
# public_port = 443
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# NETWORK & IP RESOLUTION
|
||||
# ==============================================================================
|
||||
|
||||
[network]
|
||||
|
||||
# Enable IPv4 for outbound connections to Telegram.
|
||||
ipv4 = true
|
||||
|
||||
# Enable IPv6 for outbound connections to Telegram.
|
||||
ipv6 = false
|
||||
|
||||
# Prefer IPv4 (4) or IPv6 (6) when both are available.
|
||||
prefer = 4
|
||||
|
||||
# Experimental: use both IPv4 and IPv6 ME servers simultaneously.
|
||||
# May improve reliability but doubles connection count.
|
||||
multipath = false
|
||||
|
||||
# STUN servers for external IP discovery.
|
||||
# Used for Middle-Proxy KDF (if nat_probe=true) and link generation.
|
||||
stun_servers = [
|
||||
"stun.l.google.com:5349",
|
||||
"stun1.l.google.com:3478",
|
||||
"stun.gmx.net:3478",
|
||||
"stun.l.google.com:19302"
|
||||
]
|
||||
|
||||
# If UDP STUN is blocked, attempt TCP-based STUN as fallback.
|
||||
stun_tcp_fallback = true
|
||||
|
||||
# If all STUN fails, use HTTP APIs to discover public IP.
|
||||
http_ip_detect_urls = [
|
||||
"https://ifconfig.me/ip",
|
||||
"https://api.ipify.org"
|
||||
]
|
||||
|
||||
# Cache discovered public IP to this file to survive restarts.
|
||||
cache_public_ip_path = "cache/public_ip.txt"
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# SERVER BINDING & METRICS
|
||||
# ==============================================================================
|
||||
|
||||
[server]
|
||||
|
||||
# TCP port to listen on.
|
||||
# 443 is recommended (looks like normal HTTPS traffic).
|
||||
port = 443
|
||||
|
||||
# IPv4 bind address. "0.0.0.0" = all interfaces.
|
||||
listen_addr_ipv4 = "0.0.0.0"
|
||||
|
||||
# IPv6 bind address. "::" = all interfaces.
|
||||
listen_addr_ipv6 = "::"
|
||||
|
||||
# Unix socket listener (for reverse proxy setups with Nginx/HAProxy).
|
||||
# listen_unix_sock = "/var/run/telemt.sock"
|
||||
# listen_unix_sock_perm = "0660"
|
||||
|
||||
# Enable PROXY protocol header parsing.
|
||||
# Set true ONLY if Telemt is behind HAProxy/Nginx that injects PROXY headers.
|
||||
# If enabled without a proxy in front, clients will fail to connect.
|
||||
proxy_protocol = false
|
||||
|
||||
# Prometheus metrics HTTP endpoint port.
|
||||
# Uncomment to enable. Access at http://your-server:9090/metrics
|
||||
# metrics_port = 9090
|
||||
|
||||
# IP ranges allowed to access the metrics endpoint.
|
||||
metrics_whitelist = [
|
||||
"127.0.0.1/32",
|
||||
"::1/128"
|
||||
]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Listener Overrides
|
||||
# Define explicit listeners with specific bind IPs and announce IPs.
|
||||
# The announce IP is what gets embedded in tg:// links and sent to ME servers.
|
||||
# You MUST set announce to your server's public IP for ME mode to work.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# [[server.listeners]]
|
||||
# ip = "0.0.0.0"
|
||||
# announce = "203.0.113.10"
|
||||
# reuse_allow = false
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# TIMEOUTS (seconds unless noted)
|
||||
# ==============================================================================
|
||||
|
||||
[timeouts]
|
||||
|
||||
# Maximum time for client to complete FakeTLS + MTProto handshake.
|
||||
client_handshake = 15
|
||||
|
||||
# Maximum time to establish TCP connection to upstream Telegram DC.
|
||||
tg_connect = 10
|
||||
|
||||
# TCP keepalive interval for client connections.
|
||||
client_keepalive = 60
|
||||
|
||||
# Maximum client inactivity before dropping the connection.
|
||||
client_ack = 300
|
||||
|
||||
# Instant retry count for a single ME endpoint before giving up on it.
|
||||
me_one_retry = 3
|
||||
|
||||
# Timeout (milliseconds) for a single ME endpoint connection attempt.
|
||||
me_one_timeout_ms = 1500
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# ANTI-CENSORSHIP / FAKETLS / MASKING
|
||||
# This is where Telemt becomes invisible to Deep Packet Inspection.
|
||||
# ==============================================================================
|
||||
|
||||
[censorship]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# TLS Domain Fronting
|
||||
# The SNI (Server Name Indication) your proxy presents to connecting clients.
|
||||
# Must be a popular, unblocked HTTPS website in your target country.
|
||||
# DPI sees traffic to this domain. Choose carefully.
|
||||
# Good choices: major CDNs, banks, government sites, search engines.
|
||||
# Bad choices: obscure sites, already-blocked domains.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
tls_domain = "www.google.com"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Active Probe Masking
|
||||
# When someone connects but fails the MTProto handshake (wrong secret),
|
||||
# they might be an ISP active prober testing if this is a proxy.
|
||||
#
|
||||
# mask = false: drop the connection (prober knows something is here)
|
||||
# mask = true: transparently proxy them to mask_host (prober sees a real website)
|
||||
#
|
||||
# With mask enabled, your server is indistinguishable from a real web server
|
||||
# to anyone who doesn't have the correct secret.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
mask = true
|
||||
|
||||
# The real web server to forward failed handshakes to.
|
||||
# If omitted, defaults to tls_domain.
|
||||
# mask_host = "www.google.com"
|
||||
|
||||
# Port on the mask host to connect to.
|
||||
mask_port = 443
|
||||
|
||||
# Inject PROXY protocol header when forwarding to mask host.
|
||||
# 0 = disabled, 1 = v1, 2 = v2. Leave disabled unless mask_host expects it.
|
||||
# mask_proxy_protocol = 0
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# TLS Certificate Emulation
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Size (bytes) of the locally generated fake TLS certificate.
|
||||
# Only used when tls_emulation is disabled.
|
||||
fake_cert_len = 2048
|
||||
|
||||
# KILLER FEATURE: Real-Time TLS Emulation.
|
||||
# Telemt connects to tls_domain, fetches its actual TLS 1.3 certificate chain,
|
||||
# and exactly replicates the byte sizes of ServerHello and Certificate records.
|
||||
# Defeats DPI that uses TLS record length heuristics to detect proxies.
|
||||
# Strongly recommended in censored environments.
|
||||
tls_emulation = true
|
||||
|
||||
# Directory to cache fetched TLS certificates.
|
||||
tls_front_dir = "tlsfront"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ServerHello Timing
|
||||
# Real web servers take 30-150ms to respond to ClientHello due to network
|
||||
# latency and crypto processing. A proxy responding in <1ms is suspicious.
|
||||
# These settings add realistic delay to mimic genuine server behavior.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Minimum delay before sending ServerHello (milliseconds).
|
||||
server_hello_delay_min_ms = 50
|
||||
|
||||
# Maximum delay before sending ServerHello (milliseconds).
|
||||
server_hello_delay_max_ms = 150
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# TLS Session Tickets
|
||||
# Real TLS 1.3 servers send 1-2 NewSessionTicket messages after handshake.
|
||||
# A server that sends zero tickets is anomalous and may trigger DPI flags.
|
||||
# Set this to match your tls_domain's behavior (usually 2).
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# tls_new_session_tickets = 0
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Full Certificate Frequency
|
||||
# When tls_emulation is enabled, this controls how often (per client IP)
|
||||
# to send the complete emulated certificate chain.
|
||||
#
|
||||
# > 0: Subsequent connections within TTL seconds get a smaller cached version.
|
||||
# Saves bandwidth but creates a detectable size difference between
|
||||
# first and repeat connections.
|
||||
#
|
||||
# = 0: Every connection gets the full certificate. More bandwidth but
|
||||
# perfectly consistent behavior, no anomalies for DPI to detect.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
tls_full_cert_ttl_secs = 0
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ALPN Enforcement
|
||||
# Ensure ServerHello responds with the exact ALPN protocol the client requested.
|
||||
# Mismatched ALPN (e.g., client asks h2, server says http/1.1) is a DPI red flag.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
alpn_enforce = true
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# ACCESS CONTROL & USERS
|
||||
# ==============================================================================
|
||||
|
||||
[access]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Replay Attack Protection
|
||||
# DPI can record a legitimate user's handshake and replay it later to probe
|
||||
# whether the server is a proxy. Telemt remembers recent handshake nonces
|
||||
# and rejects duplicates.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Number of nonce slots in the replay detection buffer.
|
||||
replay_check_len = 65536
|
||||
|
||||
# How long (seconds) to remember nonces before expiring them.
|
||||
replay_window_secs = 1800
|
||||
|
||||
# Allow clients with incorrect system clocks to connect.
|
||||
# false = reject clients with significant time skew (more secure)
|
||||
# true = accept anyone regardless of clock (more permissive)
|
||||
ignore_time_skew = false
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# User Secrets
|
||||
# Each user needs a unique 32-character hex string as their secret.
|
||||
# Generate with: openssl rand -hex 16
|
||||
#
|
||||
# This secret is embedded in the tg:// link. Anyone with it can connect.
|
||||
# Format: username = "hex_secret"
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.users]
|
||||
# alice = "0123456789abcdef0123456789abcdef"
|
||||
# bob = "fedcba9876543210fedcba9876543210"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Connection Limits
|
||||
# Limits concurrent TCP connections per user to prevent secret sharing.
|
||||
# Uncomment and set for each user as needed.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_max_tcp_conns]
|
||||
# alice = 100
|
||||
# bob = 50
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Expiration Dates
|
||||
# Automatically revoke access after the specified date (ISO 8601 format).
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_expirations]
|
||||
# alice = "2025-12-31T23:59:59Z"
|
||||
# bob = "2026-06-15T00:00:00Z"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Data Quotas
|
||||
# Maximum total bytes transferred per user. Connection refused after limit.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_data_quota]
|
||||
# alice = 107374182400
|
||||
# bob = 53687091200
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Unique IP Limits
|
||||
# Maximum number of different IP addresses that can use this secret
|
||||
# at the same time. Highly effective against secret leaking/sharing.
|
||||
# Set to 1 for single-device, 2-3 for phone+desktop, etc.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_max_unique_ips]
|
||||
# alice = 3
|
||||
# bob = 2
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# UPSTREAM ROUTING
|
||||
# Controls how Telemt connects to Telegram servers (or ME servers).
|
||||
# If omitted entirely, uses the OS default route.
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Direct upstream: use the server's own network interface.
|
||||
# You can optionally bind to a specific interface or local IP.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# [[upstreams]]
|
||||
# type = "direct"
|
||||
# interface = "eth0"
|
||||
# bind_addresses = ["192.0.2.10"]
|
||||
# weight = 1
|
||||
# enabled = true
|
||||
# scopes = "*"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# SOCKS5 upstream: route Telegram traffic through a SOCKS5 proxy.
|
||||
# Useful if your server's IP is blocked from reaching Telegram DCs.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# [[upstreams]]
|
||||
# type = "socks5"
|
||||
# address = "198.51.100.30:1080"
|
||||
# username = "proxy-user"
|
||||
# password = "proxy-pass"
|
||||
# weight = 1
|
||||
# enabled = true
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# DATACENTER OVERRIDES
|
||||
# Force specific DC IDs to route to specific IP:Port combinations.
|
||||
# DC 203 (CDN) is auto-injected by Telemt if not specified here.
|
||||
# ==============================================================================
|
||||
|
||||
# [dc_overrides]
|
||||
# "201" = "149.154.175.50:443"
|
||||
# "202" = ["149.154.167.51:443", "149.154.175.100:443"]
|
||||
10
config.toml
10
config.toml
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
# === General Settings ===
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
use_middle_proxy = true
|
||||
# Global ad_tag fallback when user has no per-user tag in [access.user_ad_tags]
|
||||
# ad_tag = "00000000000000000000000000000000"
|
||||
# Per-user ad_tag in [access.user_ad_tags] (32 hex from @MTProxybot)
|
||||
|
|
@ -32,8 +32,16 @@ show = "*"
|
|||
port = 443
|
||||
# proxy_protocol = false # Enable if behind HAProxy/nginx with PROXY protocol
|
||||
# metrics_port = 9090
|
||||
# metrics_listen = "0.0.0.0:9090" # Listen address for metrics (overrides metrics_port)
|
||||
# metrics_whitelist = ["127.0.0.1", "::1", "0.0.0.0/0"]
|
||||
|
||||
[server.api]
|
||||
enabled = true
|
||||
listen = "0.0.0.0:9091"
|
||||
whitelist = ["127.0.0.0/8"]
|
||||
minimal_runtime_enabled = false
|
||||
minimal_runtime_cache_ttl_ms = 1000
|
||||
|
||||
# Listen on multiple interfaces/IPs - IPv4
|
||||
[[server.listeners]]
|
||||
ip = "0.0.0.0"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,16 @@
|
|||
#!/bin/ksh
|
||||
# /etc/rc.d/telemt
|
||||
#
|
||||
# rc.d(8) script for Telemt MTProxy daemon.
|
||||
# Tokio runtime does not daemonize itself, so rc_bg=YES is used.
|
||||
|
||||
daemon="/usr/local/bin/telemt"
|
||||
daemon_user="_telemt"
|
||||
daemon_flags="/etc/telemt/config.toml"
|
||||
|
||||
. /etc/rc.d/rc.subr
|
||||
|
||||
rc_bg=YES
|
||||
rc_reload=NO
|
||||
|
||||
rc_cmd $1
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
u telemt - "telemt user" /var/lib/telemt -
|
||||
g telemt - -
|
||||
m telemt telemt
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
[Unit]
|
||||
Description=Telemt
|
||||
Wants=network-online.target
|
||||
After=multi-user.target network.target network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=telemt
|
||||
Group=telemt
|
||||
WorkingDirectory=/var/lib/telemt
|
||||
ExecStart=/usr/bin/telemt /etc/telemt/telemt.toml
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
LimitNOFILE=65536
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
NoNewPrivileges=true
|
||||
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
@ -0,0 +1 @@
|
|||
d /var/lib/telemt 700 telemt telemt
|
||||
|
|
@ -6,7 +6,8 @@ services:
|
|||
restart: unless-stopped
|
||||
ports:
|
||||
- "443:443"
|
||||
- "9090:9090"
|
||||
- "127.0.0.1:9090:9090"
|
||||
- "127.0.0.1:9091:9091"
|
||||
# Allow caching 'proxy-secret' in read-only container
|
||||
working_dir: /run/telemt
|
||||
volumes:
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,151 @@
|
|||
## How to set up a "proxy sponsor" channel and statistics via the @MTProxybot
|
||||
|
||||
1. Go to the @MTProxybot.
|
||||
2. Enter the `/newproxy` command.
|
||||
3. Send your server's IP address and port. For example: `1.2.3.4:443`.
|
||||
4. Open the configuration file: `nano /etc/telemt/telemt.toml`.
|
||||
5. Copy and send the user secret from the `[access.users]` section to the bot.
|
||||
6. Copy the tag provided by the bot. For example: `1234567890abcdef1234567890abcdef`.
|
||||
> [!WARNING]
|
||||
> The link provided by the bot will not work. Do not copy or use it!
|
||||
7. Uncomment the `ad_tag` parameter and enter the tag received from the bot.
|
||||
8. Uncomment or add the `use_middle_proxy = true` parameter.
|
||||
|
||||
Configuration example:
|
||||
```toml
|
||||
[general]
|
||||
ad_tag = "1234567890abcdef1234567890abcdef"
|
||||
use_middle_proxy = true
|
||||
```
|
||||
9. Save the changes (in nano: Ctrl+S -> Ctrl+X).
|
||||
10. Restart the telemt service: `systemctl restart telemt`.
|
||||
11. Send the `/myproxies` command to the bot and select the added server.
|
||||
12. Click the "Set promotion" button.
|
||||
13. Send a **public link** to the channel. Private channels cannot be added!
|
||||
14. Wait for about 1 hour for the information to update on Telegram servers.
|
||||
> [!WARNING]
|
||||
> The sponsored channel will not be displayed to you if you are already subscribed to it.
|
||||
|
||||
**You can also configure different sponsored channels for different users:**
|
||||
```toml
|
||||
[access.user_ad_tags]
|
||||
hello = "ad_tag"
|
||||
hello2 = "ad_tag2"
|
||||
```
|
||||
|
||||
## Why do you need a middle proxy (ME)
|
||||
https://github.com/telemt/telemt/discussions/167
|
||||
|
||||
|
||||
## How many people can use one link
|
||||
|
||||
By default, an unlimited number of people can use a single link.
|
||||
However, you can limit the number of unique IP addresses for each user:
|
||||
```toml
|
||||
[access.user_max_unique_ips]
|
||||
hello = 1
|
||||
```
|
||||
This parameter sets the maximum number of unique IP addresses from which a single link can be used simultaneously. If the first user disconnects, a second one can connect. At the same time, multiple users can connect from a single IP address simultaneously (for example, devices on the same Wi-Fi network).
|
||||
|
||||
## How to create multiple different links
|
||||
|
||||
1. Generate the required number of secrets using the command: `openssl rand -hex 16`.
|
||||
2. Open the configuration file: `nano /etc/telemt/telemt.toml`.
|
||||
3. Add new users to the `[access.users]` section:
|
||||
```toml
|
||||
[access.users]
|
||||
user1 = "00000000000000000000000000000001"
|
||||
user2 = "00000000000000000000000000000002"
|
||||
user3 = "00000000000000000000000000000003"
|
||||
```
|
||||
4. Save the configuration (Ctrl+S -> Ctrl+X). There is no need to restart the telemt service.
|
||||
5. Get the ready-to-use links using the command:
|
||||
```bash
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||
```
|
||||
|
||||
## "Unknown TLS SNI" error
|
||||
Usually, this error occurs if you have changed the `tls_domain` parameter, but users continue to connect using old links with the previous domain.
|
||||
|
||||
If you need to allow connections with any domains (ignoring SNI mismatches), add the following parameters:
|
||||
```toml
|
||||
[censorship]
|
||||
unknown_sni_action = "mask"
|
||||
```
|
||||
|
||||
## How to view metrics
|
||||
|
||||
1. Open the configuration file: `nano /etc/telemt/telemt.toml`.
|
||||
2. Add the following parameters:
|
||||
```toml
|
||||
[server]
|
||||
metrics_port = 9090
|
||||
metrics_whitelist = ["127.0.0.1/32", "::1/128", "0.0.0.0/0"]
|
||||
```
|
||||
3. Save the changes (Ctrl+S -> Ctrl+X).
|
||||
4. After that, metrics will be available at: `SERVER_IP:9090/metrics`.
|
||||
> [!WARNING]
|
||||
> The value `"0.0.0.0/0"` in `metrics_whitelist` opens access to metrics from any IP address. It is recommended to replace it with your personal IP, for example: `"1.2.3.4/32"`.
|
||||
|
||||
## Additional parameters
|
||||
|
||||
### Domain in the link instead of IP
|
||||
To display a domain instead of an IP address in the connection links, add the following lines to the configuration file:
|
||||
```toml
|
||||
[general.links]
|
||||
public_host = "proxy.example.com"
|
||||
```
|
||||
|
||||
### Total server connection limit
|
||||
This parameter limits the total number of active connections to the server:
|
||||
```toml
|
||||
[server]
|
||||
max_connections = 10000 # 0 - unlimited, 10000 - default
|
||||
```
|
||||
|
||||
### Upstream Manager
|
||||
To configure outbound connections (upstreams), add the corresponding parameters to the `[[upstreams]]` section of the configuration file:
|
||||
|
||||
#### Binding to an outbound IP address
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 1
|
||||
enabled = true
|
||||
interface = "192.168.1.100" # Replace with your outbound IP
|
||||
```
|
||||
|
||||
#### Using SOCKS4/5 as an Upstream
|
||||
- Without authorization:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
enabled = true
|
||||
```
|
||||
|
||||
- With authorization:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||
username = "user" # Username for Auth on SOCKS-server
|
||||
password = "pass" # Password for Auth on SOCKS-server
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
enabled = true
|
||||
```
|
||||
|
||||
#### Using Shadowsocks as an Upstream
|
||||
For this method to work, the `use_middle_proxy = false` parameter must be set.
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
[[upstreams]]
|
||||
type = "shadowsocks"
|
||||
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@1.2.3.4:8388"
|
||||
weight = 1
|
||||
enabled = true
|
||||
```
|
||||
151
docs/FAQ.ru.md
151
docs/FAQ.ru.md
|
|
@ -1,64 +1,151 @@
|
|||
## Как настроить канал "спонсор прокси"
|
||||
## Как настроить канал "спонсор прокси" и статистику через бота @MTProxybot
|
||||
|
||||
1. Зайти в бота @MTProxybot.
|
||||
2. Ввести команду `/newproxy`
|
||||
3. Отправить IP и порт сервера. Например: 1.2.3.4:443
|
||||
4. Открыть конфиг `nano /etc/telemt.toml`.
|
||||
5. Скопировать и отправить боту секрет пользователя из раздела [access.users].
|
||||
6. Скопировать полученный tag у бота. Например 1234567890abcdef1234567890abcdef.
|
||||
7. Раскомментировать параметр ad_tag и вписать tag, полученный у бота.
|
||||
8. Раскомментировать/добавить параметр use_middle_proxy = true.
|
||||
1. Зайдите в бота @MTProxybot.
|
||||
2. Введите команду `/newproxy`.
|
||||
3. Отправьте IP-адрес и порт сервера. Например: `1.2.3.4:443`.
|
||||
4. Откройте файл конфигурации: `nano /etc/telemt/telemt.toml`.
|
||||
5. Скопируйте и отправьте боту секрет пользователя из раздела `[access.users]`.
|
||||
6. Скопируйте тег (tag), который выдаст бот. Например: `1234567890abcdef1234567890abcdef`.
|
||||
> [!WARNING]
|
||||
> Ссылка, которую выдает бот, работать не будет. Не копируйте и не используйте её!
|
||||
7. Раскомментируйте параметр `ad_tag` и впишите тег, полученный от бота.
|
||||
8. Раскомментируйте или добавьте параметр `use_middle_proxy = true`.
|
||||
|
||||
Пример конфига:
|
||||
Пример конфигурации:
|
||||
```toml
|
||||
[general]
|
||||
ad_tag = "1234567890abcdef1234567890abcdef"
|
||||
use_middle_proxy = true
|
||||
```
|
||||
9. Сохранить конфиг. Ctrl+S -> Ctrl+X.
|
||||
10. Перезапустить telemt `systemctl restart telemt`.
|
||||
11. В боте отправить команду /myproxies и выбрать добавленный сервер.
|
||||
12. Нажать кнопку "Set promotion".
|
||||
13. Отправить **публичную ссылку** на канал. Приватный канал добавить нельзя!
|
||||
14. Подождать примерно 1 час, пока информация обновится на серверах Telegram.
|
||||
9. Сохраните изменения (в nano: Ctrl+S -> Ctrl+X).
|
||||
10. Перезапустите службу telemt: `systemctl restart telemt`.
|
||||
11. В боте отправьте команду `/myproxies` и выберите добавленный сервер.
|
||||
12. Нажмите кнопку «Set promotion».
|
||||
13. Отправьте **публичную ссылку** на канал. Приватные каналы добавлять нельзя!
|
||||
14. Подождите примерно 1 час, пока информация обновится на серверах Telegram.
|
||||
> [!WARNING]
|
||||
> У вас не будет отображаться "спонсор прокси" если вы уже подписаны на канал.
|
||||
> Спонсорский канал не будет у вас отображаться, если вы уже на него подписаны.
|
||||
|
||||
## Сколько человек может пользоваться 1 ссылкой
|
||||
**Вы также можете настроить разные спонсорские каналы для разных пользователей:**
|
||||
```toml
|
||||
[access.user_ad_tags]
|
||||
hello = "ad_tag"
|
||||
hello2 = "ad_tag2"
|
||||
```
|
||||
|
||||
По умолчанию 1 ссылкой может пользоваться сколько угодно человек.
|
||||
Вы можете ограничить число IP, использующих прокси.
|
||||
## Зачем нужен middle proxy (ME)
|
||||
https://github.com/telemt/telemt/discussions/167
|
||||
|
||||
|
||||
## Сколько человек может пользоваться одной ссылкой
|
||||
|
||||
По умолчанию одной ссылкой может пользоваться неограниченное число людей.
|
||||
Однако вы можете ограничить количество уникальных IP-адресов для каждого пользователя:
|
||||
```toml
|
||||
[access.user_max_unique_ips]
|
||||
hello = 1
|
||||
```
|
||||
Этот параметр ограничивает, сколько уникальных IP может использовать 1 ссылку одновременно. Если один пользователь отключится, второй сможет подключиться. Также с одного IP может сидеть несколько пользователей.
|
||||
Этот параметр задает максимальное количество уникальных IP-адресов, с которых можно одновременно использовать одну ссылку. Если первый пользователь отключится, второй сможет подключиться. При этом с одного IP-адреса могут подключаться несколько пользователей одновременно (например, устройства в одной Wi-Fi сети).
|
||||
|
||||
## Как сделать несколько разных ссылок
|
||||
## Как создать несколько разных ссылок
|
||||
|
||||
1. Сгенерируйте нужное число секретов `openssl rand -hex 16`
|
||||
2. Открыть конфиг `nano /etc/telemt.toml`
|
||||
3. Добавить новых пользователей.
|
||||
1. Сгенерируйте необходимое количество секретов с помощью команды: `openssl rand -hex 16`.
|
||||
2. Откройте файл конфигурации: `nano /etc/telemt/telemt.toml`.
|
||||
3. Добавьте новых пользователей в секцию `[access.users]`:
|
||||
```toml
|
||||
[access.users]
|
||||
user1 = "00000000000000000000000000000001"
|
||||
user2 = "00000000000000000000000000000002"
|
||||
user3 = "00000000000000000000000000000003"
|
||||
```
|
||||
4. Сохранить конфиг. Ctrl+S -> Ctrl+X. Перезапускать telemt не нужно.
|
||||
5. Получить ссылки через `journalctl -u telemt -n -g "links" --no-pager -o cat | tac`
|
||||
4. Сохраните конфигурацию (Ctrl+S -> Ctrl+X). Перезапускать службу telemt не нужно.
|
||||
5. Получите готовые ссылки с помощью команды:
|
||||
```bash
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||
```
|
||||
|
||||
## Ошибка "Unknown TLS SNI"
|
||||
Обычно эта ошибка возникает, если вы изменили параметр `tls_domain`, но пользователи продолжают подключаться по старым ссылкам с прежним доменом.
|
||||
|
||||
Если необходимо разрешить подключение с любыми доменами (игнорируя несовпадения SNI), добавьте следующие параметры:
|
||||
```toml
|
||||
[censorship]
|
||||
unknown_sni_action = "mask"
|
||||
```
|
||||
|
||||
## Как посмотреть метрики
|
||||
|
||||
1. Открыть конфиг `nano /etc/telemt.toml`
|
||||
2. Добавить следующие параметры
|
||||
1. Откройте файл конфигурации: `nano /etc/telemt/telemt.toml`.
|
||||
2. Добавьте следующие параметры:
|
||||
```toml
|
||||
[server]
|
||||
metrics_port = 9090
|
||||
metrics_whitelist = ["127.0.0.1/32", "::1/128", "0.0.0.0/0"]
|
||||
```
|
||||
3. Сохранить конфиг. Ctrl+S -> Ctrl+X.
|
||||
4. Метрики доступны по адресу SERVER_IP:9090/metrics.
|
||||
3. Сохраните изменения (Ctrl+S -> Ctrl+X).
|
||||
4. После этого метрики будут доступны по адресу: `SERVER_IP:9090/metrics`.
|
||||
> [!WARNING]
|
||||
> "0.0.0.0/0" в metrics_whitelist открывает доступ с любого IP. Замените на свой ip. Например "1.2.3.4"
|
||||
> Значение `"0.0.0.0/0"` в `metrics_whitelist` открывает доступ к метрикам с любого IP-адреса. Рекомендуется заменить его на ваш личный IP, например: `"1.2.3.4/32"`.
|
||||
|
||||
## Дополнительные параметры
|
||||
|
||||
### Домен в ссылке вместо IP
|
||||
Чтобы в ссылках для подключения отображался домен вместо IP-адреса, добавьте следующие строки в файл конфигурации:
|
||||
```toml
|
||||
[general.links]
|
||||
public_host = "proxy.example.com"
|
||||
```
|
||||
|
||||
### Общий лимит подключений к серверу
|
||||
Этот параметр ограничивает общее количество активных подключений к серверу:
|
||||
```toml
|
||||
[server]
|
||||
max_connections = 10000 # 0 - без ограничений, 10000 - по умолчанию
|
||||
```
|
||||
|
||||
### Upstream Manager
|
||||
Для настройки исходящих подключений (апстримов) добавьте соответствующие параметры в секцию `[[upstreams]]` файла конфигурации:
|
||||
|
||||
#### Привязка к исходящему IP-адресу
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 1
|
||||
enabled = true
|
||||
interface = "192.168.1.100" # Замените на ваш исходящий IP
|
||||
```
|
||||
|
||||
#### Использование SOCKS4/5 в качестве Upstream
|
||||
- Без авторизации:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
enabled = true
|
||||
```
|
||||
|
||||
- С авторизацией:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||
username = "user" # Username for Auth on SOCKS-server
|
||||
password = "pass" # Password for Auth on SOCKS-server
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
enabled = true
|
||||
```
|
||||
|
||||
#### Использование Shadowsocks в качестве Upstream
|
||||
Для работы этого метода требуется установить параметр `use_middle_proxy = false`.
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
[[upstreams]]
|
||||
type = "shadowsocks"
|
||||
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@1.2.3.4:8388"
|
||||
weight = 1
|
||||
enabled = true
|
||||
```
|
||||
|
|
|
|||
|
|
@ -0,0 +1,92 @@
|
|||
# Öffentliche TELEMT-Lizenz 3
|
||||
|
||||
***Alle Rechte vorbehalten (c) 2026 Telemt***
|
||||
|
||||
Hiermit wird jeder Person, die eine Kopie dieser Software und der dazugehörigen Dokumentation (nachfolgend "Software") erhält, unentgeltlich die Erlaubnis erteilt, die Software ohne Einschränkungen zu nutzen, einschließlich des Rechts, die Software zu verwenden, zu vervielfältigen, zu ändern, abgeleitete Werke zu erstellen, zu verbinden, zu veröffentlichen, zu verbreiten, zu unterlizenzieren und/oder Kopien der Software zu verkaufen sowie diese Rechte auch denjenigen einzuräumen, denen die Software zur Verfügung gestellt wird, vorausgesetzt, dass sämtliche Urheberrechtshinweise sowie die Bedingungen und Bestimmungen dieser Lizenz eingehalten werden.
|
||||
|
||||
### Begriffsbestimmungen
|
||||
|
||||
Für die Zwecke dieser Lizenz gelten die folgenden Definitionen:
|
||||
|
||||
**"Software" (Software)** — die Telemt-Software einschließlich Quellcode, Dokumentation und sämtlicher zugehöriger Dateien, die unter den Bedingungen dieser Lizenz verbreitet werden.
|
||||
|
||||
**"Contributor" (Contributor)** — jede natürliche oder juristische Person, die Code, Patches, Dokumentation oder andere Materialien eingereicht hat, die von den Maintainers des Projekts angenommen und in die Software aufgenommen wurden.
|
||||
|
||||
**"Beitrag" (Contribution)** — jedes urheberrechtlich geschützte Werk, das bewusst zur Aufnahme in die Software eingereicht wurde.
|
||||
|
||||
**"Modifizierte Version" (Modified Version)** — jede Version der Software, die gegenüber der ursprünglichen Software geändert, angepasst, erweitert oder anderweitig modifiziert wurde.
|
||||
|
||||
**"Maintainers" (Maintainers)** — natürliche oder juristische Personen, die für das offizielle Telemt-Projekt und dessen offizielle Veröffentlichungen verantwortlich sind.
|
||||
|
||||
### 1 Urheberrechtshinweis (Attribution)
|
||||
|
||||
Bei der Weitergabe der Software, sowohl in Form des Quellcodes als auch in binärer Form, MÜSSEN folgende Elemente erhalten bleiben:
|
||||
|
||||
- der oben genannte Urheberrechtshinweis;
|
||||
- der vollständige Text dieser Lizenz;
|
||||
- sämtliche bestehenden Hinweise auf Urheberschaft.
|
||||
|
||||
### 2 Hinweis auf Modifikationen
|
||||
|
||||
Wenn Änderungen an der Software vorgenommen werden, MUSS die Person, die diese Änderungen vorgenommen hat, eindeutig darauf hinweisen, dass die Software modifiziert wurde, und eine kurze Beschreibung der vorgenommenen Änderungen beifügen.
|
||||
|
||||
Modifizierte Versionen der Software DÜRFEN NICHT als die originale Version von Telemt dargestellt werden.
|
||||
|
||||
### 3 Marken und Bezeichnungen
|
||||
|
||||
Diese Lizenz GEWÄHRT KEINE Rechte zur Nutzung der Bezeichnung **"Telemt"**, des Telemt-Logos oder sonstiger Marken, Kennzeichen oder Branding-Elemente von Telemt.
|
||||
|
||||
Weiterverbreitete oder modifizierte Versionen der Software DÜRFEN die Bezeichnung Telemt nicht in einer Weise verwenden, die bei Nutzern den Eindruck eines offiziellen Ursprungs oder einer Billigung durch das Telemt-Projekt erwecken könnte, sofern hierfür keine ausdrückliche Genehmigung der Maintainers vorliegt.
|
||||
|
||||
Die Verwendung der Bezeichnung **Telemt** zur Beschreibung einer modifizierten Version der Software ist nur zulässig, wenn diese Version eindeutig als modifiziert oder inoffiziell gekennzeichnet ist.
|
||||
|
||||
Jegliche Verbreitung, die Nutzer vernünftigerweise darüber täuschen könnte, dass es sich um eine offizielle Veröffentlichung von Telemt handelt, ist untersagt.
|
||||
|
||||
### 4 Transparenz bei der Verbreitung von Binärversionen
|
||||
|
||||
Im Falle der Verbreitung kompilierter Binärversionen der Software wird der Verbreiter HIERMIT ERMUTIGT (encouraged), soweit dies vernünftigerweise möglich ist, Zugang zum entsprechenden Quellcode sowie zu den Build-Anweisungen bereitzustellen.
|
||||
|
||||
Diese Praxis trägt zur Transparenz bei und ermöglicht es Empfängern, die Integrität und Reproduzierbarkeit der verbreiteten Builds zu überprüfen.
|
||||
|
||||
## 5 Gewährung einer Patentlizenz und Beendigung von Rechten
|
||||
|
||||
Jeder Contributor gewährt den Empfängern der Software eine unbefristete, weltweite, nicht-exklusive, unentgeltliche, lizenzgebührenfreie und unwiderrufliche Patentlizenz für:
|
||||
|
||||
- die Herstellung,
|
||||
- die Beauftragung der Herstellung,
|
||||
- die Nutzung,
|
||||
- das Anbieten zum Verkauf,
|
||||
- den Verkauf,
|
||||
- den Import,
|
||||
- sowie jede sonstige Verbreitung der Software.
|
||||
|
||||
Diese Patentlizenz erstreckt sich ausschließlich auf solche Patentansprüche, die notwendigerweise durch den jeweiligen Beitrag des Contributors allein oder in Kombination mit der Software verletzt würden.
|
||||
|
||||
Leitet eine Person ein Patentverfahren ein oder beteiligt sich daran, einschließlich Gegenklagen oder Kreuzklagen, mit der Behauptung, dass die Software oder ein darin enthaltener Beitrag ein Patent verletzt, **erlöschen sämtliche durch diese Lizenz gewährten Rechte für diese Person unmittelbar mit Einreichung der Klage**.
|
||||
|
||||
Darüber hinaus erlöschen alle durch diese Lizenz gewährten Rechte **automatisch**, wenn eine Person ein gerichtliches Verfahren einleitet, in dem behauptet wird, dass die Software selbst ein Patent oder andere Rechte des geistigen Eigentums verletzt.
|
||||
|
||||
### 6 Beteiligung und Beiträge zur Entwicklung
|
||||
|
||||
Sofern ein Contributor nicht ausdrücklich etwas anderes erklärt, gilt jeder Beitrag, der bewusst zur Aufnahme in die Software eingereicht wird, als unter den Bedingungen dieser Lizenz lizenziert.
|
||||
|
||||
Durch die Einreichung eines Beitrags gewährt der Contributor den Maintainers des Telemt-Projekts sowie allen Empfängern der Software die in dieser Lizenz beschriebenen Rechte in Bezug auf diesen Beitrag.
|
||||
|
||||
### 7 Urheberhinweis bei Netzwerk- und Servicenutzung
|
||||
|
||||
Wird die Software zur Bereitstellung eines öffentlich zugänglichen Netzwerkdienstes verwendet, MUSS der Betreiber dieses Dienstes einen Hinweis auf die Urheberschaft von Telemt an mindestens einer der folgenden Stellen anbringen:
|
||||
|
||||
* in der Servicedokumentation;
|
||||
* in der Dienstbeschreibung;
|
||||
* auf einer Seite "Über" oder einer vergleichbaren Informationsseite;
|
||||
* in anderen für Nutzer zugänglichen Materialien, die in angemessenem Zusammenhang mit dem Dienst stehen.
|
||||
|
||||
Ein solcher Hinweis DARF NICHT den Eindruck erwecken, dass der Dienst vom Telemt-Projekt oder dessen Maintainers unterstützt oder offiziell gebilligt wird.
|
||||
|
||||
### 8 Haftungsausschluss und salvatorische Klausel
|
||||
|
||||
DIE SOFTWARE WIRD "WIE BESEHEN" BEREITGESTELLT, OHNE JEGLICHE AUSDRÜCKLICHE ODER STILLSCHWEIGENDE GEWÄHRLEISTUNG, EINSCHLIESSLICH, ABER NICHT BESCHRÄNKT AUF GEWÄHRLEISTUNGEN DER MARKTGÄNGIGKEIT, DER EIGNUNG FÜR EINEN BESTIMMTEN ZWECK UND DER NICHTVERLETZUNG VON RECHTEN.
|
||||
|
||||
IN KEINEM FALL HAFTEN DIE AUTOREN ODER RECHTEINHABER FÜR IRGENDWELCHE ANSPRÜCHE, SCHÄDEN ODER SONSTIGE HAFTUNG, DIE AUS VERTRAG, UNERLAUBTER HANDLUNG ODER AUF ANDERE WEISE AUS DER SOFTWARE ODER DER NUTZUNG DER SOFTWARE ENTSTEHEN.
|
||||
|
||||
SOLLTE EINE BESTIMMUNG DIESER LIZENZ ALS UNWIRKSAM ODER NICHT DURCHSETZBAR ANGESEHEN WERDEN, IST DIESE BESTIMMUNG SO AUSZULEGEN, DASS SIE DEM URSPRÜNGLICHEN WILLEN DER PARTEIEN MÖGLICHST NAHEKOMMT; DIE ÜBRIGEN BESTIMMUNGEN BLEIBEN DAVON UNBERÜHRT UND IN VOLLER WIRKUNG.
|
||||
|
|
@ -0,0 +1,143 @@
|
|||
###### TELEMT Public License 3 ######
|
||||
##### Copyright (c) 2026 Telemt #####
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this Software and associated documentation files (the "Software"),
|
||||
to use, reproduce, modify, prepare derivative works of, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
persons to whom the Software is furnished to do so, provided that all
|
||||
copyright notices, license terms, and conditions set forth in this License
|
||||
are preserved and complied with.
|
||||
|
||||
### Official Translations
|
||||
|
||||
The canonical version of this License is the English version.
|
||||
|
||||
Official translations are provided for informational purposes only
|
||||
and for convenience, and do not have legal force. In case of any
|
||||
discrepancy, the English version of this License shall prevail.
|
||||
|
||||
Available versions:
|
||||
- English in Markdown: docs/LICENSE/LICENSE.md
|
||||
- German: docs/LICENSE/LICENSE.de.md
|
||||
- Russian: docs/LICENSE/LICENSE.ru.md
|
||||
|
||||
### Definitions
|
||||
|
||||
For the purposes of this License:
|
||||
|
||||
"Software" means the Telemt software, including source code, documentation,
|
||||
and any associated files distributed under this License.
|
||||
|
||||
"Contributor" means any person or entity that submits code, patches,
|
||||
documentation, or other contributions to the Software that are accepted
|
||||
into the Software by the maintainers.
|
||||
|
||||
"Contribution" means any work of authorship intentionally submitted
|
||||
to the Software for inclusion in the Software.
|
||||
|
||||
"Modified Version" means any version of the Software that has been
|
||||
changed, adapted, extended, or otherwise modified from the original
|
||||
Software.
|
||||
|
||||
"Maintainers" means the individuals or entities responsible for
|
||||
the official Telemt project and its releases.
|
||||
|
||||
#### 1 Attribution
|
||||
|
||||
Redistributions of the Software, in source or binary form, MUST RETAIN the
|
||||
above copyright notice, this license text, and any existing attribution
|
||||
notices.
|
||||
|
||||
#### 2 Modification Notice
|
||||
|
||||
If you modify the Software, you MUST clearly state that the Software has been
|
||||
modified and include a brief description of the changes made.
|
||||
|
||||
Modified versions MUST NOT be presented as the original Telemt.
|
||||
|
||||
#### 3 Trademark and Branding
|
||||
|
||||
This license DOES NOT grant permission to use the name "Telemt",
|
||||
the Telemt logo, or any Telemt trademarks or branding.
|
||||
|
||||
Redistributed or modified versions of the Software MAY NOT use the Telemt
|
||||
name in a way that suggests endorsement or official origin without explicit
|
||||
permission from the Telemt maintainers.
|
||||
|
||||
Use of the name "Telemt" to describe a modified version of the Software
|
||||
is permitted only if the modified version is clearly identified as a
|
||||
modified or unofficial version.
|
||||
|
||||
Any distribution that could reasonably confuse users into believing that
|
||||
the software is an official Telemt release is prohibited.
|
||||
|
||||
#### 4 Binary Distribution Transparency
|
||||
|
||||
If you distribute compiled binaries of the Software,
|
||||
you are ENCOURAGED to provide access to the corresponding
|
||||
source code and build instructions where reasonably possible.
|
||||
|
||||
This helps preserve transparency and allows recipients to verify the
|
||||
integrity and reproducibility of distributed builds.
|
||||
|
||||
#### 5 Patent Grant and Defensive Termination Clause
|
||||
|
||||
Each contributor grants you a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Software.
|
||||
|
||||
This patent license applies only to those patent claims necessarily
|
||||
infringed by the contributor’s contribution alone or by combination of
|
||||
their contribution with the Software.
|
||||
|
||||
If you initiate or participate in any patent litigation, including
|
||||
cross-claims or counterclaims, alleging that the Software or any
|
||||
contribution incorporated within the Software constitutes patent
|
||||
infringement, then **all rights granted to you under this license shall
|
||||
terminate immediately** as of the date such litigation is filed.
|
||||
|
||||
Additionally, if you initiate legal action alleging that the
|
||||
Software itself infringes your patent or other intellectual
|
||||
property rights, then all rights granted to you under this
|
||||
license SHALL TERMINATE automatically.
|
||||
|
||||
#### 6 Contributions
|
||||
|
||||
Unless you explicitly state otherwise, any Contribution intentionally
|
||||
submitted for inclusion in the Software shall be licensed under the terms
|
||||
of this License.
|
||||
|
||||
By submitting a Contribution, you grant the Telemt maintainers and all
|
||||
recipients of the Software the rights described in this License with
|
||||
respect to that Contribution.
|
||||
|
||||
#### 7 Network Use Attribution
|
||||
|
||||
If the Software is used to provide a publicly accessible network service,
|
||||
the operator of such service MUST provide attribution to Telemt in at least
|
||||
one of the following locations:
|
||||
|
||||
- service documentation
|
||||
- service description
|
||||
- an "About" or similar informational page
|
||||
- other user-visible materials reasonably associated with the service
|
||||
|
||||
Such attribution MUST NOT imply endorsement by the Telemt project or its
|
||||
maintainers.
|
||||
|
||||
#### 8 Disclaimer of Warranty and Severability Clause
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
USE OR OTHER DEALINGS IN THE SOFTWARE
|
||||
|
||||
IF ANY PROVISION OF THIS LICENSE IS HELD TO BE INVALID OR UNENFORCEABLE,
|
||||
SUCH PROVISION SHALL BE INTERPRETED TO REFLECT THE ORIGINAL INTENT
|
||||
OF THE PARTIES AS CLOSELY AS POSSIBLE, AND THE REMAINING PROVISIONS
|
||||
SHALL REMAIN IN FULL FORCE AND EFFECT
|
||||
|
|
@ -0,0 +1,90 @@
|
|||
# Публичная лицензия TELEMT 3
|
||||
|
||||
***Все права защищёны (c) 2026 Telemt***
|
||||
|
||||
Настоящим любому лицу, получившему копию данного программного обеспечения и сопутствующей документации (далее — "Программное обеспечение"), безвозмездно предоставляется разрешение использовать Программное обеспечение без ограничений, включая право использовать, воспроизводить, изменять, создавать производные произведения, объединять, публиковать, распространять, сублицензировать и (или) продавать копии Программного обеспечения, а также предоставлять такие права лицам, которым предоставляется Программное обеспечение, при условии соблюдения всех уведомлений об авторских правах, условий и положений настоящей Лицензии.
|
||||
|
||||
### Определения
|
||||
|
||||
Для целей настоящей Лицензии применяются следующие определения:
|
||||
|
||||
**"Программное обеспечение" (Software)** — программное обеспечение Telemt, включая исходный код, документацию и любые связанные файлы, распространяемые на условиях настоящей Лицензии.
|
||||
|
||||
**"Контрибьютор" (Contributor)** — любое физическое или юридическое лицо, направившее код, исправления (патчи), документацию или иные материалы, которые были приняты мейнтейнерами проекта и включены в состав Программного обеспечения.
|
||||
|
||||
**"Вклад" (Contribution)** — любое произведение авторского права, намеренно представленное для включения в состав Программного обеспечения.
|
||||
|
||||
**"Модифицированная версия" (Modified Version)** — любая версия Программного обеспечения, которая была изменена, адаптирована, расширена или иным образом модифицирована по сравнению с исходным Программным обеспечением.
|
||||
|
||||
**"Мейнтейнеры" (Maintainers)** — физические или юридические лица, ответственные за официальный проект Telemt и его официальные релизы.
|
||||
|
||||
### 1 Указание авторства
|
||||
|
||||
При распространении Программного обеспечения, как в форме исходного кода, так и в бинарной форме, ДОЛЖНЫ СОХРАНЯТЬСЯ:
|
||||
|
||||
- указанное выше уведомление об авторских правах;
|
||||
- текст настоящей Лицензии;
|
||||
- любые существующие уведомления об авторстве.
|
||||
|
||||
### 2 Уведомление о модификации
|
||||
|
||||
В случае внесения изменений в Программное обеспечение лицо, осуществившее такие изменения, ОБЯЗАНО явно указать, что Программное обеспечение было модифицировано, а также включить краткое описание внесённых изменений.
|
||||
|
||||
Модифицированные версии Программного обеспечения НЕ ДОЛЖНЫ представляться как оригинальная версия Telemt.
|
||||
|
||||
### 3 Товарные знаки и обозначения
|
||||
|
||||
Настоящая Лицензия НЕ ПРЕДОСТАВЛЯЕТ права использовать наименование **"Telemt"**, логотип Telemt, а также любые товарные знаки, фирменные обозначения или элементы бренда Telemt.
|
||||
|
||||
Распространяемые или модифицированные версии Программного обеспечения НЕ ДОЛЖНЫ использовать наименование Telemt таким образом, который может создавать у пользователей впечатление официального происхождения либо одобрения со стороны проекта Telemt без явного разрешения мейнтейнеров проекта.
|
||||
|
||||
Использование наименования **Telemt** для описания модифицированной версии Программного обеспечения допускается только при условии, что такая версия ясно обозначена как модифицированная или неофициальная.
|
||||
|
||||
Запрещается любое распространение, которое может разумно вводить пользователей в заблуждение относительно того, что программное обеспечение является официальным релизом Telemt.
|
||||
|
||||
### 4 Прозрачность распространения бинарных версий
|
||||
|
||||
В случае распространения скомпилированных бинарных версий Программного обеспечения распространитель НАСТОЯЩИМ ПОБУЖДАЕТСЯ предоставлять доступ к соответствующему исходному коду и инструкциям по сборке, если это разумно возможно.
|
||||
|
||||
Такая практика способствует прозрачности распространения и позволяет получателям проверять целостность и воспроизводимость распространяемых сборок.
|
||||
|
||||
### 5 Предоставление патентной лицензии и прекращение прав
|
||||
|
||||
Каждый контрибьютор предоставляет получателям Программного обеспечения бессрочную, всемирную, неисключительную, безвозмездную, не требующую выплаты роялти и безотзывную патентную лицензию на:
|
||||
|
||||
- изготовление,
|
||||
- поручение изготовления,
|
||||
- использование,
|
||||
- предложение к продаже,
|
||||
- продажу,
|
||||
- импорт,
|
||||
- и иное распространение Программного обеспечения.
|
||||
|
||||
Такая патентная лицензия распространяется исключительно на те патентные требования, которые неизбежно нарушаются соответствующим вкладом контрибьютора как таковым либо его сочетанием с Программным обеспечением.
|
||||
|
||||
Если лицо инициирует либо участвует в каком-либо судебном разбирательстве по патентному спору, включая встречные или перекрёстные иски, утверждая, что Программное обеспечение либо любой вклад, включённый в него, нарушает патент, **все права, предоставленные такому лицу настоящей Лицензией, немедленно прекращаются** с даты подачи соответствующего иска.
|
||||
|
||||
Кроме того, если лицо инициирует судебное разбирательство, утверждая, что само Программное обеспечение нарушает его патентные либо иные права интеллектуальной собственности, все права, предоставленные настоящей Лицензией, **автоматически прекращаются**.
|
||||
|
||||
### 6 Участие и вклад в разработку
|
||||
|
||||
Если контрибьютор явно не указал иное, любой Вклад, намеренно представленный для включения в Программное обеспечение, считается лицензированным на условиях настоящей Лицензии.
|
||||
Путём предоставления Вклада контрибьютор предоставляет мейнтейнером проекта Telemt и всем получателям Программного обеспечения права, предусмотренные настоящей Лицензией, в отношении такого Вклада.
|
||||
|
||||
### 7 Указание авторства при сетевом и сервисном использовании
|
||||
|
||||
В случае использования Программного обеспечения для предоставления публично доступного сетевого сервиса оператор такого сервиса ОБЯЗАН обеспечить указание авторства Telemt как минимум в одном из следующих мест:
|
||||
- документация сервиса;
|
||||
- описание сервиса;
|
||||
- страница "О программе" или аналогичная информационная страница;
|
||||
- иные материалы, доступные пользователям и разумно связанные с данным сервисом.
|
||||
|
||||
Такое указание авторства НЕ ДОЛЖНО создавать впечатление одобрения или официальной поддержки со стороны проекта Telemt либо его мейнтейнеров.
|
||||
|
||||
### 8 Отказ от гарантий и делимость положений
|
||||
|
||||
ПРОГРАММНОЕ ОБЕСПЕЧЕНИЕ ПРЕДОСТАВЛЯЕТСЯ "КАК ЕСТЬ", БЕЗ КАКИХ-ЛИБО ГАРАНТИЙ, ЯВНЫХ ИЛИ ПОДРАЗУМЕВАЕМЫХ, ВКЛЮЧАЯ, НО НЕ ОГРАНИЧИВАЯСЬ ГАРАНТИЯМИ КОММЕРЧЕСКОЙ ПРИГОДНОСТИ, ПРИГОДНОСТИ ДЛЯ КОНКРЕТНОЙ ЦЕЛИ И НЕНАРУШЕНИЯ ПРАВ.
|
||||
|
||||
НИ ПРИ КАКИХ ОБСТОЯТЕЛЬСТВАХ АВТОРЫ ИЛИ ПРАВООБЛАДАТЕЛИ НЕ НЕСУТ ОТВЕТСТВЕННОСТИ ПО КАКИМ-ЛИБО ТРЕБОВАНИЯМ, УБЫТКАМ ИЛИ ИНОЙ ОТВЕТСТВЕННОСТИ, ВОЗНИКАЮЩЕЙ В РЕЗУЛЬТАТЕ ДОГОВОРА, ДЕЛИКТА ИЛИ ИНЫМ ОБРАЗОМ, СВЯЗАННЫМ С ПРОГРАММНЫМ ОБЕСПЕЧЕНИЕМ ИЛИ ЕГО ИСПОЛЬЗОВАНИЕМ.
|
||||
|
||||
В СЛУЧАЕ ЕСЛИ КАКОЕ-ЛИБО ПОЛОЖЕНИЕ НАСТОЯЩЕЙ ЛИЦЕНЗИИ ПРИЗНАЁТСЯ НЕДЕЙСТВИТЕЛЬНЫМ ИЛИ НЕПРИМЕНИМЫМ, ТАКОЕ ПОЛОЖЕНИЕ ПОДЛЕЖИТ ТОЛКОВАНИЮ МАКСИМАЛЬНО БЛИЗКО К ИСХОДНОМУ НАМЕРЕНИЮ СТОРОН, ПРИ ЭТОМ ОСТАЛЬНЫЕ ПОЛОЖЕНИЯ СОХРАНЯЮТ ПОЛНУЮ ЮРИДИЧЕСКУЮ СИЛУ.
|
||||
|
|
@ -0,0 +1,132 @@
|
|||
# Telemt on OpenBSD (Build, Run, and rc.d)
|
||||
|
||||
This guide covers a practical OpenBSD deployment flow for Telemt:
|
||||
- build from source,
|
||||
- install binary and config,
|
||||
- run as an rc.d daemon,
|
||||
- verify basic runtime behavior.
|
||||
|
||||
## 1. Prerequisites
|
||||
|
||||
Install required packages:
|
||||
|
||||
```sh
|
||||
doas pkg_add rust git
|
||||
```
|
||||
|
||||
Notes:
|
||||
- Telemt release installer (`install.sh`) is Linux-only.
|
||||
- On OpenBSD, use source build with `cargo`.
|
||||
|
||||
## 2. Build from source
|
||||
|
||||
```sh
|
||||
git clone https://github.com/telemt/telemt
|
||||
cd telemt
|
||||
cargo build --release
|
||||
./target/release/telemt --version
|
||||
```
|
||||
|
||||
For low-RAM systems, this repository already uses `lto = "thin"` in release profile.
|
||||
|
||||
## 3. Install binary and config
|
||||
|
||||
```sh
|
||||
doas install -d -m 0755 /usr/local/bin
|
||||
doas install -m 0755 ./target/release/telemt /usr/local/bin/telemt
|
||||
|
||||
doas install -d -m 0750 /etc/telemt
|
||||
doas install -m 0640 ./config.toml /etc/telemt/config.toml
|
||||
```
|
||||
|
||||
## 4. Create runtime user
|
||||
|
||||
```sh
|
||||
doas useradd -L daemon -s /sbin/nologin -d /var/empty _telemt
|
||||
```
|
||||
|
||||
If `_telemt` already exists, continue.
|
||||
|
||||
## 5. Install rc.d service
|
||||
|
||||
Install the provided script:
|
||||
|
||||
```sh
|
||||
doas install -m 0555 ./contrib/openbsd/telemt.rcd /etc/rc.d/telemt
|
||||
```
|
||||
|
||||
Enable and start:
|
||||
|
||||
```sh
|
||||
doas rcctl enable telemt
|
||||
# Optional: send daemon output to syslog
|
||||
#doas rcctl set telemt logger daemon.info
|
||||
|
||||
doas rcctl start telemt
|
||||
```
|
||||
|
||||
Service controls:
|
||||
|
||||
```sh
|
||||
doas rcctl check telemt
|
||||
doas rcctl restart telemt
|
||||
doas rcctl stop telemt
|
||||
```
|
||||
|
||||
## 6. Resource limits (recommended)
|
||||
|
||||
OpenBSD rc.d can apply limits via login class. Add class `telemt` and assign it to `_telemt`.
|
||||
|
||||
Example class entry:
|
||||
|
||||
```text
|
||||
telemt:\
|
||||
:openfiles-cur=8192:openfiles-max=16384:\
|
||||
:datasize-cur=768M:datasize-max=1024M:\
|
||||
:coredumpsize=0:\
|
||||
:tc=daemon:
|
||||
```
|
||||
|
||||
These values are conservative defaults for small and medium deployments.
|
||||
Increase `openfiles-*` only if logs show descriptor exhaustion under load.
|
||||
|
||||
Then rebuild database and assign class:
|
||||
|
||||
```sh
|
||||
doas cap_mkdb /etc/login.conf
|
||||
#doas usermod -L telemt _telemt
|
||||
```
|
||||
|
||||
Uncomment `usermod` if you want this class bound to the Telemt user.
|
||||
|
||||
## 7. Functional smoke test
|
||||
|
||||
1. Validate service state:
|
||||
|
||||
```sh
|
||||
doas rcctl check telemt
|
||||
```
|
||||
|
||||
2. Check listener is present (replace 443 if needed):
|
||||
|
||||
```sh
|
||||
netstat -n -f inet -p tcp | grep LISTEN | grep '\.443'
|
||||
```
|
||||
|
||||
3. Verify process user:
|
||||
|
||||
```sh
|
||||
ps -o user,pid,command -ax | grep telemt | grep -v grep
|
||||
```
|
||||
|
||||
4. If startup fails, debug in foreground:
|
||||
|
||||
```sh
|
||||
RUST_LOG=debug /usr/local/bin/telemt /etc/telemt/config.toml
|
||||
```
|
||||
|
||||
## 8. OpenBSD-specific caveats
|
||||
|
||||
- OpenBSD does not support per-socket keepalive retries/interval tuning in the same way as Linux.
|
||||
- Telemt source already uses target-aware cfg gates for keepalive setup.
|
||||
- Use rc.d/rcctl, not systemd.
|
||||
|
|
@ -27,12 +27,12 @@ chmod +x /bin/telemt
|
|||
|
||||
**0. Check port and generate secrets**
|
||||
|
||||
The port you have selected for use should be MISSING from the list, when:
|
||||
The port you have selected for use should not be in the list:
|
||||
```bash
|
||||
netstat -lnp
|
||||
```
|
||||
|
||||
Generate 16 bytes/32 characters HEX with OpenSSL or another way:
|
||||
Generate 16 bytes/32 characters in HEX format with OpenSSL or another way:
|
||||
```bash
|
||||
openssl rand -hex 16
|
||||
```
|
||||
|
|
@ -48,24 +48,39 @@ Save the obtained result somewhere. You will need it later!
|
|||
|
||||
---
|
||||
|
||||
**1. Place your config to /etc/telemt.toml**
|
||||
**1. Place your config to /etc/telemt/telemt.toml**
|
||||
|
||||
Create the config directory:
|
||||
```bash
|
||||
mkdir /etc/telemt
|
||||
```
|
||||
|
||||
Open nano
|
||||
```bash
|
||||
nano /etc/telemt.toml
|
||||
nano /etc/telemt/telemt.toml
|
||||
```
|
||||
paste your config
|
||||
Insert your configuration:
|
||||
|
||||
```toml
|
||||
# === General Settings ===
|
||||
[general]
|
||||
# ad_tag = "00000000000000000000000000000000"
|
||||
use_middle_proxy = false
|
||||
|
||||
[general.modes]
|
||||
classic = false
|
||||
secure = false
|
||||
tls = true
|
||||
|
||||
[server]
|
||||
port = 443
|
||||
|
||||
[server.api]
|
||||
enabled = true
|
||||
# listen = "127.0.0.1:9091"
|
||||
# whitelist = ["127.0.0.1/32"]
|
||||
# read_only = true
|
||||
|
||||
# === Anti-Censorship & Masking ===
|
||||
[censorship]
|
||||
tls_domain = "petrovich.ru"
|
||||
|
|
@ -74,47 +89,74 @@ tls_domain = "petrovich.ru"
|
|||
# format: "username" = "32_hex_chars_secret"
|
||||
hello = "00000000000000000000000000000000"
|
||||
```
|
||||
|
||||
then Ctrl+S -> Ctrl+X to save
|
||||
|
||||
> [!WARNING]
|
||||
> Replace the value of the hello parameter with the value you obtained in step 0.
|
||||
> Replace the value of the tls_domain parameter with another website.
|
||||
> Replace the value of the hello parameter with the value you obtained in step 0.
|
||||
> Additionally, change the value of the tls_domain parameter to a different website.
|
||||
> Changing the tls_domain parameter will break all links that use the old domain!
|
||||
|
||||
---
|
||||
|
||||
**2. Create service on /etc/systemd/system/telemt.service**
|
||||
**2. Create telemt user**
|
||||
|
||||
```bash
|
||||
useradd -d /opt/telemt -m -r -U telemt
|
||||
chown -R telemt:telemt /etc/telemt
|
||||
```
|
||||
|
||||
**3. Create service in /etc/systemd/system/telemt.service**
|
||||
|
||||
Open nano
|
||||
```bash
|
||||
nano /etc/systemd/system/telemt.service
|
||||
```
|
||||
|
||||
paste this Systemd Module
|
||||
Insert this Systemd module:
|
||||
```bash
|
||||
[Unit]
|
||||
Description=Telemt
|
||||
After=network.target
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/bin
|
||||
ExecStart=/bin/telemt /etc/telemt.toml
|
||||
User=telemt
|
||||
Group=telemt
|
||||
WorkingDirectory=/opt/telemt
|
||||
ExecStart=/bin/telemt /etc/telemt/telemt.toml
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
AmbientCapabilities=CAP_NET_ADMIN CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_ADMIN CAP_NET_BIND_SERVICE
|
||||
NoNewPrivileges=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
then Ctrl+S -> Ctrl+X to save
|
||||
|
||||
reload systemd units
|
||||
```bash
|
||||
systemctl daemon-reload
|
||||
```
|
||||
|
||||
**3.** To start it, enter the command `systemctl start telemt`
|
||||
**4.** To start it, enter the command `systemctl start telemt`
|
||||
|
||||
**4.** To get status information, enter `systemctl status telemt`
|
||||
**5.** To get status information, enter `systemctl status telemt`
|
||||
|
||||
**5.** For automatic startup at system boot, enter `systemctl enable telemt`
|
||||
**6.** For automatic startup at system boot, enter `systemctl enable telemt`
|
||||
|
||||
**6.** To get the links, enter `journalctl -u telemt -n -g "links" --no-pager -o cat | tac`
|
||||
**7.** To get the link(s), enter:
|
||||
```bash
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||
```
|
||||
|
||||
> Any number of people can use one link.
|
||||
|
||||
> [!WARNING]
|
||||
> Only the command from step 7 can provide a working link. Do not try to create it yourself or copy it from anywhere if you are not sure what you are doing!
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -143,6 +185,8 @@ docker compose down
|
|||
docker build -t telemt:local .
|
||||
docker run --name telemt --restart unless-stopped \
|
||||
-p 443:443 \
|
||||
-p 9090:9090 \
|
||||
-p 9091:9091 \
|
||||
-e RUST_LOG=info \
|
||||
-v "$PWD/config.toml:/app/config.toml:ro" \
|
||||
--read-only \
|
||||
|
|
|
|||
|
|
@ -48,11 +48,16 @@ python3 -c 'import os; print(os.urandom(16).hex())'
|
|||
|
||||
---
|
||||
|
||||
**1. Поместите свою конфигурацию в файл /etc/telemt.toml**
|
||||
**1. Поместите свою конфигурацию в файл /etc/telemt/telemt.toml**
|
||||
|
||||
Создаём директорию для конфига:
|
||||
```bash
|
||||
mkdir /etc/telemt
|
||||
```
|
||||
|
||||
Открываем nano
|
||||
```bash
|
||||
nano /etc/telemt.toml
|
||||
nano /etc/telemt/telemt.toml
|
||||
```
|
||||
Вставьте свою конфигурацию
|
||||
|
||||
|
|
@ -60,12 +65,22 @@ nano /etc/telemt.toml
|
|||
# === General Settings ===
|
||||
[general]
|
||||
# ad_tag = "00000000000000000000000000000000"
|
||||
use_middle_proxy = false
|
||||
|
||||
[general.modes]
|
||||
classic = false
|
||||
secure = false
|
||||
tls = true
|
||||
|
||||
[server]
|
||||
port = 443
|
||||
|
||||
[server.api]
|
||||
enabled = true
|
||||
# listen = "127.0.0.1:9091"
|
||||
# whitelist = ["127.0.0.1/32"]
|
||||
# read_only = true
|
||||
|
||||
# === Anti-Censorship & Masking ===
|
||||
[censorship]
|
||||
tls_domain = "petrovich.ru"
|
||||
|
|
@ -74,15 +89,24 @@ tls_domain = "petrovich.ru"
|
|||
# format: "username" = "32_hex_chars_secret"
|
||||
hello = "00000000000000000000000000000000"
|
||||
```
|
||||
|
||||
Затем нажмите Ctrl+S -> Ctrl+X, чтобы сохранить
|
||||
|
||||
> [!WARNING]
|
||||
> Замените значение параметра hello на значение, которое вы получили в пункте 0.
|
||||
> Так же замените значение параметра tls_domain на другой сайт.
|
||||
> Изменение параметра tls_domain сделает нерабочими все ссылки, использующие старый домен!
|
||||
|
||||
---
|
||||
|
||||
**2. Создайте службу в /etc/systemd/system/telemt.service**
|
||||
**2. Создайте пользователя для telemt**
|
||||
|
||||
```bash
|
||||
useradd -d /opt/telemt -m -r -U telemt
|
||||
chown -R telemt:telemt /etc/telemt
|
||||
```
|
||||
|
||||
**3. Создайте службу в /etc/systemd/system/telemt.service**
|
||||
|
||||
Открываем nano
|
||||
```bash
|
||||
|
|
@ -93,28 +117,45 @@ nano /etc/systemd/system/telemt.service
|
|||
```bash
|
||||
[Unit]
|
||||
Description=Telemt
|
||||
After=network.target
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/bin
|
||||
ExecStart=/bin/telemt /etc/telemt.toml
|
||||
User=telemt
|
||||
Group=telemt
|
||||
WorkingDirectory=/opt/telemt
|
||||
ExecStart=/bin/telemt /etc/telemt/telemt.toml
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
AmbientCapabilities=CAP_NET_ADMIN CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_ADMIN CAP_NET_BIND_SERVICE
|
||||
NoNewPrivileges=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
Затем нажмите Ctrl+S -> Ctrl+X, чтобы сохранить
|
||||
|
||||
перезагрузите конфигурацию systemd
|
||||
```bash
|
||||
systemctl daemon-reload
|
||||
```
|
||||
|
||||
**3.** Для запуска введите команду `systemctl start telemt`
|
||||
**4.** Для запуска введите команду `systemctl start telemt`
|
||||
|
||||
**4.** Для получения информации о статусе введите `systemctl status telemt`
|
||||
**5.** Для получения информации о статусе введите `systemctl status telemt`
|
||||
|
||||
**5.** Для автоматического запуска при запуске системы в введите `systemctl enable telemt`
|
||||
**6.** Для автоматического запуска при запуске системы в введите `systemctl enable telemt`
|
||||
|
||||
**6.** Для получения ссылки введите `journalctl -u telemt -n -g "links" --no-pager -o cat | tac`
|
||||
**7.** Для получения ссылки/ссылок введите
|
||||
```bash
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||
```
|
||||
> Одной ссылкой может пользоваться сколько угодно человек.
|
||||
|
||||
> [!WARNING]
|
||||
> Рабочую ссылку может выдать только команда из 7 пункта. Не пытайтесь делать ее самостоятельно или копировать откуда-либо если вы не уверены в том, что делаете!
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -138,11 +179,13 @@ docker compose down
|
|||
> - По умолчанию публикуются порты 443:443, а контейнер запускается со сброшенными привилегиями (добавлена только `NET_BIND_SERVICE`)
|
||||
> - Если вам действительно нужна сеть хоста (обычно это требуется только для некоторых конфигураций IPv6), раскомментируйте `network_mode: host`
|
||||
|
||||
**Запуск в Docker Compose**
|
||||
**Запуск без Docker Compose**
|
||||
```bash
|
||||
docker build -t telemt:local .
|
||||
docker run --name telemt --restart unless-stopped \
|
||||
-p 443:443 \
|
||||
-p 9090:9090 \
|
||||
-p 9091:9091 \
|
||||
-e RUST_LOG=info \
|
||||
-v "$PWD/config.toml:/app/config.toml:ro" \
|
||||
--read-only \
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ Die unten angegebenen `Default`-Werte sind Code-Defaults (bei fehlendem Schlüss
|
|||
|
||||
| Feld | Gilt für | Typ | Pflicht | Default | Bedeutung |
|
||||
|---|---|---|---|---|---|
|
||||
| `[[upstreams]].type` | alle Upstreams | `"direct" \| "socks4" \| "socks5"` | ja | n/a | Upstream-Transporttyp. |
|
||||
| `[[upstreams]].type` | alle Upstreams | `"direct" \| "socks4" \| "socks5" \| "shadowsocks"` | ja | n/a | Upstream-Transporttyp. |
|
||||
| `[[upstreams]].weight` | alle Upstreams | `u16` | nein | `1` | Basisgewicht für weighted-random Auswahl. |
|
||||
| `[[upstreams]].enabled` | alle Upstreams | `bool` | nein | `true` | Deaktivierte Einträge werden beim Start ignoriert. |
|
||||
| `[[upstreams]].scopes` | alle Upstreams | `String` | nein | `""` | Komma-separierte Scope-Tags für Request-Routing. |
|
||||
|
|
@ -95,6 +95,8 @@ Die unten angegebenen `Default`-Werte sind Code-Defaults (bei fehlendem Schlüss
|
|||
| `interface` | `socks5` | `Option<String>` | nein | `null` | Wird nur genutzt, wenn `address` als `ip:port` angegeben ist. |
|
||||
| `username` | `socks5` | `Option<String>` | nein | `null` | SOCKS5 Benutzername. |
|
||||
| `password` | `socks5` | `Option<String>` | nein | `null` | SOCKS5 Passwort. |
|
||||
| `url` | `shadowsocks` | `String` | ja | n/a | Shadowsocks-SIP002-URL (`ss://...`). In Runtime-APIs wird nur `host:port` offengelegt. |
|
||||
| `interface` | `shadowsocks` | `Option<String>` | nein | `null` | Optionales ausgehendes Bind-Interface oder lokale Literal-IP. |
|
||||
|
||||
### Runtime-Regeln (wichtig)
|
||||
|
||||
|
|
@ -115,6 +117,7 @@ Die unten angegebenen `Default`-Werte sind Code-Defaults (bei fehlendem Schlüss
|
|||
8. Im ME-Modus wird der gewählte Upstream auch für den ME-TCP-Dial-Pfad verwendet.
|
||||
9. Im ME-Modus ist bei `direct` mit bind/interface die STUN-Reflection bind-aware für KDF-Adressmaterial.
|
||||
10. Im ME-Modus werden bei SOCKS-Upstream `BND.ADDR/BND.PORT` für KDF verwendet, wenn gültig/öffentlich und gleiche IP-Familie.
|
||||
11. `shadowsocks`-Upstreams erfordern `general.use_middle_proxy = false`. Mit aktiviertem ME-Modus schlägt das Laden der Config sofort fehl.
|
||||
|
||||
## Upstream-Konfigurationsbeispiele
|
||||
|
||||
|
|
@ -150,7 +153,20 @@ weight = 2
|
|||
enabled = true
|
||||
```
|
||||
|
||||
### Beispiel 4: Gemischte Upstreams mit Scopes
|
||||
### Beispiel 4: Shadowsocks-Upstream
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
[[upstreams]]
|
||||
type = "shadowsocks"
|
||||
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@198.51.100.50:8388"
|
||||
weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Beispiel 5: Gemischte Upstreams mit Scopes
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ Defaults below are code defaults (used when a key is omitted), not necessarily v
|
|||
|
||||
| Field | Applies to | Type | Required | Default | Meaning |
|
||||
|---|---|---|---|---|---|
|
||||
| `[[upstreams]].type` | all upstreams | `"direct" \| "socks4" \| "socks5"` | yes | n/a | Upstream transport type. |
|
||||
| `[[upstreams]].type` | all upstreams | `"direct" \| "socks4" \| "socks5" \| "shadowsocks"` | yes | n/a | Upstream transport type. |
|
||||
| `[[upstreams]].weight` | all upstreams | `u16` | no | `1` | Base weight for weighted-random selection. |
|
||||
| `[[upstreams]].enabled` | all upstreams | `bool` | no | `true` | Disabled entries are ignored at startup. |
|
||||
| `[[upstreams]].scopes` | all upstreams | `String` | no | `""` | Comma-separated scope tags for request-level routing. |
|
||||
|
|
@ -95,6 +95,8 @@ Defaults below are code defaults (used when a key is omitted), not necessarily v
|
|||
| `interface` | `socks5` | `Option<String>` | no | `null` | Used only for SOCKS server `ip:port` dial path. |
|
||||
| `username` | `socks5` | `Option<String>` | no | `null` | SOCKS5 username auth. |
|
||||
| `password` | `socks5` | `Option<String>` | no | `null` | SOCKS5 password auth. |
|
||||
| `url` | `shadowsocks` | `String` | yes | n/a | Shadowsocks SIP002 URL (`ss://...`). Only `host:port` is exposed in runtime APIs. |
|
||||
| `interface` | `shadowsocks` | `Option<String>` | no | `null` | Optional outgoing bind interface or literal local IP. |
|
||||
|
||||
### Runtime rules (important)
|
||||
|
||||
|
|
@ -115,6 +117,7 @@ Defaults below are code defaults (used when a key is omitted), not necessarily v
|
|||
8. In ME mode, the selected upstream is also used for ME TCP dial path.
|
||||
9. In ME mode for `direct` upstream with bind/interface, STUN reflection logic is bind-aware for KDF source material.
|
||||
10. In ME mode for SOCKS upstream, SOCKS `BND.ADDR/BND.PORT` is used for KDF when it is valid/public for the same family.
|
||||
11. `shadowsocks` upstreams require `general.use_middle_proxy = false`. Config load fails fast if ME mode is enabled.
|
||||
|
||||
## Upstream Configuration Examples
|
||||
|
||||
|
|
@ -150,7 +153,20 @@ weight = 2
|
|||
enabled = true
|
||||
```
|
||||
|
||||
### Example 4: Mixed upstreams with scopes
|
||||
### Example 4: Shadowsocks upstream
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
[[upstreams]]
|
||||
type = "shadowsocks"
|
||||
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@198.51.100.50:8388"
|
||||
weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Example 5: Mixed upstreams with scopes
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@
|
|||
|
||||
| Поле | Применимость | Тип | Обязательно | Default | Назначение |
|
||||
|---|---|---|---|---|---|
|
||||
| `[[upstreams]].type` | все upstream | `"direct" \| "socks4" \| "socks5"` | да | n/a | Тип upstream транспорта. |
|
||||
| `[[upstreams]].type` | все upstream | `"direct" \| "socks4" \| "socks5" \| "shadowsocks"` | да | n/a | Тип upstream транспорта. |
|
||||
| `[[upstreams]].weight` | все upstream | `u16` | нет | `1` | Базовый вес в weighted-random выборе. |
|
||||
| `[[upstreams]].enabled` | все upstream | `bool` | нет | `true` | Выключенные записи игнорируются на старте. |
|
||||
| `[[upstreams]].scopes` | все upstream | `String` | нет | `""` | Список scope-токенов через запятую для маршрутизации. |
|
||||
|
|
@ -95,6 +95,8 @@
|
|||
| `interface` | `socks5` | `Option<String>` | нет | `null` | Используется только если `address` задан как `ip:port`. |
|
||||
| `username` | `socks5` | `Option<String>` | нет | `null` | Логин SOCKS5 auth. |
|
||||
| `password` | `socks5` | `Option<String>` | нет | `null` | Пароль SOCKS5 auth. |
|
||||
| `url` | `shadowsocks` | `String` | да | n/a | Shadowsocks SIP002 URL (`ss://...`). В runtime API раскрывается только `host:port`. |
|
||||
| `interface` | `shadowsocks` | `Option<String>` | нет | `null` | Необязательный исходящий bind-интерфейс или literal локальный IP. |
|
||||
|
||||
### Runtime-правила
|
||||
|
||||
|
|
@ -115,6 +117,7 @@
|
|||
8. В ME-режиме выбранный upstream также используется для ME TCP dial path.
|
||||
9. В ME-режиме для `direct` upstream с bind/interface STUN-рефлексия выполняется bind-aware для KDF материала.
|
||||
10. В ME-режиме для SOCKS upstream используются `BND.ADDR/BND.PORT` для KDF, если адрес валиден/публичен и соответствует IP family.
|
||||
11. `shadowsocks` upstream требует `general.use_middle_proxy = false`. При включенном ME-режиме конфиг отклоняется при загрузке.
|
||||
|
||||
## Примеры конфигурации Upstreams
|
||||
|
||||
|
|
@ -150,7 +153,20 @@ weight = 2
|
|||
enabled = true
|
||||
```
|
||||
|
||||
### Пример 4: смешанные upstream с scopes
|
||||
### Пример 4: Shadowsocks upstream
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
[[upstreams]]
|
||||
type = "shadowsocks"
|
||||
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@198.51.100.50:8388"
|
||||
weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Пример 5: смешанные upstream с scopes
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
|
|
|
|||
|
|
@ -0,0 +1,287 @@
|
|||
<img src="https://gist.githubusercontent.com/avbor/1f8a128e628f47249aae6e058a57610b/raw/19013276c035e91058e0a9799ab145f8e70e3ff5/scheme.svg">
|
||||
|
||||
## Concept
|
||||
- **Server A** (__conditionally Russian Federation_):\
|
||||
Entry point, receives Telegram proxy user traffic via **HAProxy** (port `443`)\
|
||||
and sends it to the tunnel to Server **B**.\
|
||||
Internal IP in the tunnel — `10.10.10.2`\
|
||||
Port for HAProxy clients — `443\tcp`
|
||||
- **Server B** (_conditionally Netherlands_):\
|
||||
Exit point, runs **telemt** and accepts client connections through Server **A**.\
|
||||
The server must have unrestricted access to Telegram servers.\
|
||||
Internal IP in the tunnel — `10.10.10.1`\
|
||||
AmneziaWG port — `8443\udp`\
|
||||
Port for telemt clients — `443\tcp`
|
||||
|
||||
---
|
||||
|
||||
## Step 1. Setting up the AmneziaWG tunnel (A <-> B)
|
||||
[AmneziaWG](https://github.com/amnezia-vpn/amneziawg-linux-kernel-module) must be installed on all servers.\
|
||||
All following commands are given for **Ubuntu 24.04**.\
|
||||
For RHEL-based distributions, installation instructions are available at the link above.
|
||||
|
||||
### Installing AmneziaWG (Servers A and B)
|
||||
The following steps must be performed on each server:
|
||||
|
||||
#### 1. Adding the AmneziaWG repository and installing required packages:
|
||||
```bash
|
||||
sudo apt install -y software-properties-common python3-launchpadlib gnupg2 linux-headers-$(uname -r) && \
|
||||
sudo add-apt-repository ppa:amnezia/ppa && \
|
||||
sudo apt-get install -y amneziawg
|
||||
```
|
||||
|
||||
#### 2. Generating a unique key pair:
|
||||
```bash
|
||||
cd /etc/amnezia/amneziawg && \
|
||||
awg genkey | tee private.key | awg pubkey > public.key
|
||||
```
|
||||
|
||||
As a result, you will get two files in the `/etc/amnezia/amneziawg` folder:\
|
||||
`private.key` - private, and\
|
||||
`public.key` - public server keys
|
||||
|
||||
#### 3. Configuring network interfaces:
|
||||
Obfuscation parameters `S1`, `S2`, `H1`, `H2`, `H3`, `H4` must be strictly identical on both servers.\
|
||||
Parameters `Jc`, `Jmin` and `Jmax` can differ.\
|
||||
Parameters `I1-I5` ([Custom Protocol Signature](https://docs.amnezia.org/documentation/amnezia-wg/)) must be specified on the client side (Server **A**).
|
||||
|
||||
Recommendations for choosing values:
|
||||
|
||||
```text
|
||||
Jc — 1 ≤ Jc ≤ 128; from 4 to 12 inclusive
|
||||
Jmin — Jmax > Jmin < 1280*; recommended 8
|
||||
Jmax — Jmin < Jmax ≤ 1280*; recommended 80
|
||||
S1 — S1 ≤ 1132* (1280* - 148 = 1132); S1 + 56 ≠ S2;
|
||||
recommended range from 15 to 150 inclusive
|
||||
S2 — S2 ≤ 1188* (1280* - 92 = 1188);
|
||||
recommended range from 15 to 150 inclusive
|
||||
H1/H2/H3/H4 — must be unique and differ from each other;
|
||||
recommended range from 5 to 2147483647 inclusive
|
||||
|
||||
* It is assumed that the Internet connection has an MTU of 1280.
|
||||
```
|
||||
|
||||
> [!IMPORTANT]
|
||||
> It is recommended to use your own, unique values.\
|
||||
> You can use the [generator](https://htmlpreview.github.io/?https://gist.githubusercontent.com/avbor/955782b5c37b06240b243aa375baeac5/raw/13f5517ca473b47c412b9a99407066de973732bd/awg-gen.html) to select parameters.
|
||||
|
||||
#### Server B Configuration (Netherlands):
|
||||
|
||||
Create the interface configuration file (`awg0`)
|
||||
```bash
|
||||
nano /etc/amnezia/amneziawg/awg0.conf
|
||||
```
|
||||
|
||||
File content
|
||||
```ini
|
||||
[Interface]
|
||||
Address = 10.10.10.1/24
|
||||
ListenPort = 8443
|
||||
PrivateKey = <PRIVATE_KEY_SERVER_B>
|
||||
SaveConfig = true
|
||||
Jc = 4
|
||||
Jmin = 8
|
||||
Jmax = 80
|
||||
S1 = 29
|
||||
S2 = 15
|
||||
S3 = 18
|
||||
S4 = 0
|
||||
H1 = 2087563914
|
||||
H2 = 188817757
|
||||
H3 = 101784570
|
||||
H4 = 432174303
|
||||
|
||||
[Peer]
|
||||
PublicKey = <PUBLIC_KEY_SERVER_A>
|
||||
AllowedIPs = 10.10.10.2/32
|
||||
```
|
||||
`ListenPort` - the port on which the server will wait for connections, you can choose any free one.\
|
||||
`<PRIVATE_KEY_SERVER_B>` - the content of the `private.key` file from Server **B**.\
|
||||
`<PUBLIC_KEY_SERVER_A>` - the content of the `public.key` file from Server **A**.
|
||||
|
||||
Open the port on the firewall (if enabled):
|
||||
```bash
|
||||
sudo ufw allow from <PUBLIC_IP_SERVER_A> to any port 8443 proto udp
|
||||
```
|
||||
|
||||
`<PUBLIC_IP_SERVER_A>` - the external IP address of Server **A**.
|
||||
|
||||
#### Server A Configuration (Russian Federation):
|
||||
Create the interface configuration file (awg0)
|
||||
|
||||
```bash
|
||||
nano /etc/amnezia/amneziawg/awg0.conf
|
||||
```
|
||||
|
||||
File content
|
||||
```ini
|
||||
[Interface]
|
||||
Address = 10.10.10.2/24
|
||||
PrivateKey = <PRIVATE_KEY_SERVER_A>
|
||||
Jc = 4
|
||||
Jmin = 8
|
||||
Jmax = 80
|
||||
S1 = 29
|
||||
S2 = 15
|
||||
S3 = 18
|
||||
S4 = 0
|
||||
H1 = 2087563914
|
||||
H2 = 188817757
|
||||
H3 = 101784570
|
||||
H4 = 432174303
|
||||
I1 = <b 0xc10000000108981eba846e21f74e00>
|
||||
I2 = <b 0xc20000000108981eba846e21f74e00>
|
||||
I3 = <b 0xc30000000108981eba846e21f74e00>
|
||||
I4 = <b 0x43981eba846e21f74e>
|
||||
I5 = <b 0x43981eba846e21f74e>
|
||||
|
||||
[Peer]
|
||||
PublicKey = <PUBLIC_KEY_SERVER_B>
|
||||
Endpoint = <PUBLIC_IP_SERVER_B>:8443
|
||||
AllowedIPs = 10.10.10.1/32
|
||||
PersistentKeepalive = 25
|
||||
```
|
||||
|
||||
`<PRIVATE_KEY_SERVER_A>` - the content of the `private.key` file from Server **A**.\
|
||||
`<PUBLIC_KEY_SERVER_B>` - the content of the `public.key` file from Server **B**.\
|
||||
`<PUBLIC_IP_SERVER_B>` - the public IP address of Server **B**.
|
||||
|
||||
Enable the tunnel on both servers:
|
||||
```bash
|
||||
sudo systemctl enable --now awg-quick@awg0
|
||||
```
|
||||
|
||||
Make sure Server B is accessible from Server A through the tunnel.
|
||||
```bash
|
||||
ping 10.10.10.1
|
||||
PING 10.10.10.1 (10.10.10.1) 56(84) bytes of data.
|
||||
64 bytes from 10.10.10.1: icmp_seq=1 ttl=64 time=35.1 ms
|
||||
64 bytes from 10.10.10.1: icmp_seq=2 ttl=64 time=35.0 ms
|
||||
64 bytes from 10.10.10.1: icmp_seq=3 ttl=64 time=35.1 ms
|
||||
^C
|
||||
```
|
||||
---
|
||||
|
||||
## Step 2. Installing telemt on Server B (conditionally Netherlands)
|
||||
Installation and configuration are described [here](https://github.com/telemt/telemt/blob/main/docs/QUICK_START_GUIDE.ru.md) or [here](https://gitlab.com/An0nX/telemt-docker#-quick-start-docker-compose).\
|
||||
It is assumed that telemt expects connections on port `443\tcp`.
|
||||
|
||||
In the telemt config, you must enable the `Proxy` protocol and restrict connections to it only through the tunnel.
|
||||
```toml
|
||||
[server]
|
||||
port = 443
|
||||
listen_addr_ipv4 = "10.10.10.1"
|
||||
proxy_protocol = true
|
||||
```
|
||||
|
||||
Also, for correct link generation, specify the FQDN or IP address and port of Server `A`
|
||||
```toml
|
||||
[general.links]
|
||||
show = "*"
|
||||
public_host = "<FQDN_OR_IP_SERVER_A>"
|
||||
public_port = 443
|
||||
```
|
||||
|
||||
Open the port on the firewall (if enabled):
|
||||
```bash
|
||||
sudo ufw allow from 10.10.10.2 to any port 443 proto tcp
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 3. Configuring HAProxy on Server A (Russian Federation)
|
||||
Since the version in the standard Ubuntu repository is relatively old, it makes sense to use the official Docker image.\
|
||||
[Instructions](https://docs.docker.com/engine/install/ubuntu/) for installing Docker on Ubuntu.
|
||||
|
||||
> [!WARNING]
|
||||
> By default, regular users do not have rights to use ports < 1024.
|
||||
> Attempts to run HAProxy on port 443 can lead to errors:
|
||||
> ```
|
||||
> [ALERT] (8) : Binding [/usr/local/etc/haproxy/haproxy.cfg:17] for frontend tcp_in_443:
|
||||
> protocol tcpv4: cannot bind socket (Permission denied) for [0.0.0.0:443].
|
||||
> ```
|
||||
> There are two simple ways to bypass this restriction, choose one:
|
||||
> 1. At the OS level, change the net.ipv4.ip_unprivileged_port_start setting to allow users to use all ports:
|
||||
> ```
|
||||
> echo "net.ipv4.ip_unprivileged_port_start = 0" | sudo tee -a /etc/sysctl.conf && sudo sysctl -p
|
||||
> ```
|
||||
> or
|
||||
>
|
||||
> 2. Run HAProxy as root:
|
||||
> Uncomment the `user: "root"` parameter in docker-compose.yaml.
|
||||
|
||||
#### Create a folder for HAProxy:
|
||||
```bash
|
||||
mkdir -p /opt/docker-compose/haproxy && cd $_
|
||||
```
|
||||
|
||||
#### Create the docker-compose.yaml file
|
||||
`nano docker-compose.yaml`
|
||||
|
||||
File content
|
||||
```yaml
|
||||
services:
|
||||
haproxy:
|
||||
image: haproxy:latest
|
||||
container_name: haproxy
|
||||
restart: unless-stopped
|
||||
# user: "root"
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "1"
|
||||
```
|
||||
|
||||
#### Create the haproxy.cfg config file
|
||||
Accept connections on port 443\tcp and send them through the tunnel to Server `B` 10.10.10.1:443
|
||||
|
||||
`nano haproxy.cfg`
|
||||
|
||||
File content
|
||||
|
||||
```haproxy
|
||||
global
|
||||
log stdout format raw local0
|
||||
maxconn 10000
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode tcp
|
||||
option tcplog
|
||||
option clitcpka
|
||||
option srvtcpka
|
||||
timeout connect 5s
|
||||
timeout client 2h
|
||||
timeout server 2h
|
||||
timeout check 5s
|
||||
|
||||
frontend tcp_in_443
|
||||
bind *:443
|
||||
maxconn 8000
|
||||
option tcp-smart-accept
|
||||
default_backend telemt_nodes
|
||||
|
||||
backend telemt_nodes
|
||||
option tcp-smart-connect
|
||||
server server_a 10.10.10.1:443 check inter 5s rise 2 fall 3 send-proxy-v2
|
||||
|
||||
|
||||
```
|
||||
> [!WARNING]
|
||||
> **The file must end with an empty line, otherwise HAProxy will not start!**
|
||||
|
||||
#### Allow port 443\tcp in the firewall (if enabled)
|
||||
```bash
|
||||
sudo ufw allow 443/tcp
|
||||
```
|
||||
|
||||
#### Start the HAProxy container
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
If everything is configured correctly, you can now try connecting Telegram clients using links from the telemt log\api.
|
||||
|
|
@ -0,0 +1,291 @@
|
|||
<img src="https://gist.githubusercontent.com/avbor/1f8a128e628f47249aae6e058a57610b/raw/19013276c035e91058e0a9799ab145f8e70e3ff5/scheme.svg">
|
||||
|
||||
## Концепция
|
||||
- **Сервер A** (_РФ_):\
|
||||
Точка входа, принимает трафик пользователей Telegram-прокси через **HAProxy** (порт `443`)\
|
||||
и отправляет в туннель на Сервер **B**.\
|
||||
Внутренний IP в туннеле — `10.10.10.2`\
|
||||
Порт для клиентов HAProxy — `443\tcp`
|
||||
- **Сервер B** (_условно Нидерланды_):\
|
||||
Точка выхода, на нем работает **telemt** и принимает подключения клиентов через Сервер **A**.\
|
||||
На сервере должен быть неограниченный доступ до серверов Telegram.\
|
||||
Внутренний IP в туннеле — `10.10.10.1`\
|
||||
Порт AmneziaWG — `8443\udp`\
|
||||
Порт для клиентов telemt — `443\tcp`
|
||||
|
||||
---
|
||||
|
||||
## Шаг 1. Настройка туннеля AmneziaWG (A <-> B)
|
||||
|
||||
На всех серверах необходимо установить [amneziawg](https://github.com/amnezia-vpn/amneziawg-linux-kernel-module).\
|
||||
Далее все команды даны для **Ununtu 24.04**.\
|
||||
Для RHEL-based дистрибутивов инструкция по установке есть по ссылке выше.
|
||||
|
||||
### Установка AmneziaWG (Сервера A и B)
|
||||
На каждом из серверов необходимо выполнить следующие шаги:
|
||||
|
||||
#### 1. Добавление репозитория AmneziaWG и установка необходимых пакетов:
|
||||
```bash
|
||||
sudo apt install -y software-properties-common python3-launchpadlib gnupg2 linux-headers-$(uname -r) && \
|
||||
sudo add-apt-repository ppa:amnezia/ppa && \
|
||||
sudo apt-get install -y amneziawg
|
||||
```
|
||||
|
||||
#### 2. Генерация уникальной пары ключей:
|
||||
```bash
|
||||
cd /etc/amnezia/amneziawg && \
|
||||
awg genkey | tee private.key | awg pubkey > public.key
|
||||
```
|
||||
В результате вы получите в папке `/etc/amnezia/amneziawg` два файла:\
|
||||
`private.key` - приватный и\
|
||||
`public.key` - публичный ключи сервера
|
||||
|
||||
#### 3. Настройка сетевых интерфейсов:
|
||||
|
||||
Параметры обфускации `S1`, `S2`, `H1`, `H2`, `H3`, `H4` должны быть строго идентичными на обоих серверах.\
|
||||
Параметры `Jc`, `Jmin` и `Jmax` могут отличатся.\
|
||||
Параметры `I1-I5` ([Custom Protocol Signature](https://docs.amnezia.org/documentation/amnezia-wg/)) нужно указывать на стороне _клиента_ (Сервер **А**).
|
||||
|
||||
Рекомендации по выбору значений:
|
||||
```text
|
||||
Jc — 1 ≤ Jc ≤ 128; от 4 до 12 включительно
|
||||
Jmin — Jmax > Jmin < 1280*; рекомендовано 8
|
||||
Jmax — Jmin < Jmax ≤ 1280*; рекомендовано 80
|
||||
S1 — S1 ≤ 1132* (1280* - 148 = 1132); S1 + 56 ≠ S2;
|
||||
рекомендованный диапазон от 15 до 150 включительно
|
||||
S2 — S2 ≤ 1188* (1280* - 92 = 1188);
|
||||
рекомендованный диапазон от 15 до 150 включительно
|
||||
H1/H2/H3/H4 — должны быть уникальны и отличаться друг от друга;
|
||||
рекомендованный диапазон от 5 до 2147483647 включительно
|
||||
|
||||
* Предполагается, что подключение к Интернету имеет MTU 1280.
|
||||
```
|
||||
> [!IMPORTANT]
|
||||
> Рекомендуется использовать собственные, уникальные значения.\
|
||||
> Для выбора параметров можете воспользоваться [генератором](https://htmlpreview.github.io/?https://gist.githubusercontent.com/avbor/955782b5c37b06240b243aa375baeac5/raw/13f5517ca473b47c412b9a99407066de973732bd/awg-gen.html).
|
||||
|
||||
#### Конфигурация Сервера B (_Нидерланды_):
|
||||
|
||||
Создаем файл конфигурации интерфейса (`awg0`)
|
||||
```bash
|
||||
nano /etc/amnezia/amneziawg/awg0.conf
|
||||
```
|
||||
|
||||
Содержимое файла
|
||||
```ini
|
||||
[Interface]
|
||||
Address = 10.10.10.1/24
|
||||
ListenPort = 8443
|
||||
PrivateKey = <PRIVATE_KEY_SERVER_B>
|
||||
SaveConfig = true
|
||||
Jc = 4
|
||||
Jmin = 8
|
||||
Jmax = 80
|
||||
S1 = 29
|
||||
S2 = 15
|
||||
S3 = 18
|
||||
S4 = 0
|
||||
H1 = 2087563914
|
||||
H2 = 188817757
|
||||
H3 = 101784570
|
||||
H4 = 432174303
|
||||
|
||||
[Peer]
|
||||
PublicKey = <PUBLIC_KEY_SERVER_A>
|
||||
AllowedIPs = 10.10.10.2/32
|
||||
```
|
||||
|
||||
`ListenPort` - порт, на котором сервер будет ждать подключения, можете выбрать любой свободный.\
|
||||
`<PRIVATE_KEY_SERVER_B>` - содержимое файла `private.key` с сервера **B**.\
|
||||
`<PUBLIC_KEY_SERVER_A>` - содержимое файла `public.key` с сервера **A**.
|
||||
|
||||
Открываем порт на фаерволе (если включен):
|
||||
```bash
|
||||
sudo ufw allow from <PUBLIC_IP_SERVER_A> to any port 8443 proto udp
|
||||
```
|
||||
|
||||
`<PUBLIC_IP_SERVER_A>` - внешний IP адрес Сервера **A**.
|
||||
|
||||
#### Конфигурация Сервера A (_РФ_):
|
||||
|
||||
Создаем файл конфигурации интерфейса (`awg0`)
|
||||
```bash
|
||||
nano /etc/amnezia/amneziawg/awg0.conf
|
||||
```
|
||||
|
||||
Содержимое файла
|
||||
```ini
|
||||
[Interface]
|
||||
Address = 10.10.10.2/24
|
||||
PrivateKey = <PRIVATE_KEY_SERVER_A>
|
||||
Jc = 4
|
||||
Jmin = 8
|
||||
Jmax = 80
|
||||
S1 = 29
|
||||
S2 = 15
|
||||
S3 = 18
|
||||
S4 = 0
|
||||
H1 = 2087563914
|
||||
H2 = 188817757
|
||||
H3 = 101784570
|
||||
H4 = 432174303
|
||||
I1 = <b 0xc10000000108981eba846e21f74e00>
|
||||
I2 = <b 0xc20000000108981eba846e21f74e00>
|
||||
I3 = <b 0xc30000000108981eba846e21f74e00>
|
||||
I4 = <b 0x43981eba846e21f74e>
|
||||
I5 = <b 0x43981eba846e21f74e>
|
||||
|
||||
[Peer]
|
||||
PublicKey = <PUBLIC_KEY_SERVER_B>
|
||||
Endpoint = <PUBLIC_IP_SERVER_B>:8443
|
||||
AllowedIPs = 10.10.10.1/32
|
||||
PersistentKeepalive = 25
|
||||
```
|
||||
|
||||
`<PRIVATE_KEY_SERVER_A>` - содержимое файла `private.key` с сервера **A**.\
|
||||
`<PUBLIC_KEY_SERVER_B>` - содержимое файла `public.key` с сервера **B**.\
|
||||
`<PUBLIC_IP_SERVER_B>` - публичный IP адресс сервера **B**.
|
||||
|
||||
#### Включаем туннель на обоих серверах:
|
||||
```bash
|
||||
sudo systemctl enable --now awg-quick@awg0
|
||||
```
|
||||
|
||||
Убедитесь, что с Сервера `A` доступен Сервер `B` через туннель.
|
||||
```bash
|
||||
ping 10.10.10.1
|
||||
PING 10.10.10.1 (10.10.10.1) 56(84) bytes of data.
|
||||
64 bytes from 10.10.10.1: icmp_seq=1 ttl=64 time=35.1 ms
|
||||
64 bytes from 10.10.10.1: icmp_seq=2 ttl=64 time=35.0 ms
|
||||
64 bytes from 10.10.10.1: icmp_seq=3 ttl=64 time=35.1 ms
|
||||
^C
|
||||
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Шаг 2. Установка telemt на Сервере B (_условно Нидерланды_)
|
||||
|
||||
Установка и настройка описаны [здесь](https://github.com/telemt/telemt/blob/main/docs/QUICK_START_GUIDE.ru.md) или [здесь](https://gitlab.com/An0nX/telemt-docker#-quick-start-docker-compose).\
|
||||
Подразумевается что telemt ожидает подключения на порту `443\tcp`.
|
||||
|
||||
В конфиге telemt необходимо включить протокол `Proxy` и ограничить подключения к нему только через туннель.
|
||||
|
||||
```toml
|
||||
[server]
|
||||
port = 443
|
||||
listen_addr_ipv4 = "10.10.10.1"
|
||||
proxy_protocol = true
|
||||
```
|
||||
|
||||
А также, для правильной генерации ссылок, указать FQDN или IP адрес и порт Сервера `A`
|
||||
|
||||
```toml
|
||||
[general.links]
|
||||
show = "*"
|
||||
public_host = "<FQDN_OR_IP_SERVER_A>"
|
||||
public_port = 443
|
||||
```
|
||||
|
||||
Открываем порт на фаерволе (если включен):
|
||||
```bash
|
||||
sudo ufw allow from 10.10.10.2 to any port 443 proto tcp
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Шаг 3. Настройка HAProxy на Сервере A (_РФ_)
|
||||
|
||||
Т.к. в стандартном репозитории Ubuntu версия относительно старая, имеет смысл воспользоваться официальным образом Docker.\
|
||||
[Инструкция](https://docs.docker.com/engine/install/ubuntu/) по установке Docker на Ubuntu.
|
||||
|
||||
> [!WARNING]
|
||||
> По умолчанию у обычных пользователей нет прав на использование портов < 1024.\
|
||||
> Попытки запустить HAProxy на 443 порту могут приводить к ошибкам:
|
||||
> ```
|
||||
> [ALERT] (8) : Binding [/usr/local/etc/haproxy/haproxy.cfg:17] for frontend tcp_in_443:
|
||||
> protocol tcpv4: cannot bind socket (Permission denied) for [0.0.0.0:443].
|
||||
> ```
|
||||
> Есть два простых способа обойти это ограничение, выберите что-то одно:
|
||||
> 1. На уровне ОС изменить настройку net.ipv4.ip_unprivileged_port_start, разрешив пользователям использовать все порты:
|
||||
> ```
|
||||
> echo "net.ipv4.ip_unprivileged_port_start = 0" | sudo tee -a /etc/sysctl.conf && sudo sysctl -p
|
||||
> ```
|
||||
> или
|
||||
>
|
||||
> 2. Запустить HAProxy под root:\
|
||||
> Раскомментируйте в docker-compose.yaml параметр `user: "root"`.
|
||||
|
||||
#### Создаем папку для HAProxy:
|
||||
```bash
|
||||
mkdir -p /opt/docker-compose/haproxy && cd $_
|
||||
```
|
||||
#### Создаем файл docker-compose.yaml
|
||||
|
||||
`nano docker-compose.yaml`
|
||||
|
||||
Содержимое файла
|
||||
```yaml
|
||||
services:
|
||||
haproxy:
|
||||
image: haproxy:latest
|
||||
container_name: haproxy
|
||||
restart: unless-stopped
|
||||
# user: "root"
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "1"
|
||||
```
|
||||
#### Создаем файл конфига haproxy.cfg
|
||||
Принимаем подключения на порту 443\tcp и отправляем их через туннель на Сервер `B` 10.10.10.1:443
|
||||
|
||||
`nano haproxy.cfg`
|
||||
|
||||
Содержимое файла
|
||||
```haproxy
|
||||
global
|
||||
log stdout format raw local0
|
||||
maxconn 10000
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode tcp
|
||||
option tcplog
|
||||
option clitcpka
|
||||
option srvtcpka
|
||||
timeout connect 5s
|
||||
timeout client 2h
|
||||
timeout server 2h
|
||||
timeout check 5s
|
||||
|
||||
frontend tcp_in_443
|
||||
bind *:443
|
||||
maxconn 8000
|
||||
option tcp-smart-accept
|
||||
default_backend telemt_nodes
|
||||
|
||||
backend telemt_nodes
|
||||
option tcp-smart-connect
|
||||
server server_a 10.10.10.1:443 check inter 5s rise 2 fall 3 send-proxy-v2
|
||||
|
||||
|
||||
```
|
||||
>[!WARNING]
|
||||
>**Файл должен заканчиваться пустой строкой, иначе HAProxy не запустится!**
|
||||
|
||||
#### Разрешаем порт 443\tcp в фаерволе (если включен)
|
||||
```bash
|
||||
sudo ufw allow 443/tcp
|
||||
```
|
||||
|
||||
#### Запускаем контейнер HAProxy
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Если все настроено верно, то теперь можно пробовать подключить клиентов Telegram с использованием ссылок из лога\api telemt.
|
||||
|
|
@ -0,0 +1,278 @@
|
|||
# TLS-F и TCP-S в Telemt
|
||||
|
||||
## Общая архитектура
|
||||
|
||||
**Telemt** - это прежде всего реализация **MTProxy**, через которую проходит payload Telegram
|
||||
|
||||
Подсистема **TLS-Fronting / TCP-Splitting** служит **маскировочным транспортным слоем**, задача которого - сделать MTProxy-соединение внешне похожим на обычное TLS-подключение к легитимному сайту
|
||||
|
||||
Таким образом:
|
||||
|
||||
- **MTProxy** - основной функциональный слой Telemt для обработки Telegram-трафика
|
||||
- **TLS-Fronting / TCP-Splitting** - подсистема маскировки транспорта
|
||||
|
||||
С точки зрения сети Telemt ведёт себя как **TLS-сервер**, но фактически:
|
||||
|
||||
- валидные MTProxy-клиенты остаются внутри контура Telemt
|
||||
- любые другие TLS-клиенты проксируются на обычный HTTPS-сервер-заглушку
|
||||
|
||||
# Базовый сценарий / Best-practice
|
||||
|
||||
Предположим, у вас есть домен:
|
||||
|
||||
```
|
||||
umweltschutz.de
|
||||
```
|
||||
|
||||
### 1 DNS
|
||||
|
||||
Вы создаёте A-запись:
|
||||
|
||||
```
|
||||
umweltschutz.de -> A-запись 198.18.88.88
|
||||
```
|
||||
|
||||
где `198.18.88.88` - IP вашего сервера с telemt
|
||||
|
||||
### 2 TLS-домен
|
||||
|
||||
В конфигурации Telemt:
|
||||
|
||||
```toml
|
||||
[censorship]
|
||||
tls_domain = "umweltschutz.de"
|
||||
```
|
||||
|
||||
Этот домен используется клиентом как SNI в ClientHello
|
||||
|
||||
### 3 Сервер-заглушка
|
||||
|
||||
Вы поднимаете обычный HTTPS-сервер, например **nginx**, с сертификатом для этого домена.
|
||||
|
||||
Он может работать:
|
||||
|
||||
- на том же сервере
|
||||
- на другом сервере
|
||||
- на другом порту
|
||||
|
||||
В конфигурации Telemt:
|
||||
|
||||
```toml
|
||||
[censorship]
|
||||
mask_host = "127.0.0.1"
|
||||
mask_port = 8443
|
||||
```
|
||||
|
||||
где `127.0.0.1` - IP сервера-заглушки, а 8443 - порт, который он слушает
|
||||
|
||||
Этот сервер нужен **для обработки любых non-MTProxy запросов**
|
||||
|
||||
### 4 Работа Telemt
|
||||
|
||||
После запуска Telemt действует следующим образом:
|
||||
|
||||
1) принимает входящее TCP-соединение
|
||||
2) анализирует TLS-ClientHello
|
||||
3) пытается определить, является ли соединение валидным **MTProxy FakeTLS**
|
||||
|
||||
Далее работают два варианта логики:
|
||||
|
||||
---
|
||||
|
||||
# Сценарий 1 - MTProxy клиент с валидным ключом
|
||||
|
||||
Если клиент предъявил **валидный MTProxy-ключ**:
|
||||
|
||||
- соединение **остаётся внутри Telemt**
|
||||
- TLS используется только как **транспортная маскировка**
|
||||
- далее запускается обычная логика **MTProxy**
|
||||
|
||||
Для внешнего наблюдателя это выглядит как:
|
||||
|
||||
```
|
||||
TLS connection -> umweltschutz.de
|
||||
```
|
||||
|
||||
Хотя внутри передаётся **MTProto-трафик Telegram**
|
||||
|
||||
# Сценарий 2 - обычный TLS-клиент - crawler / scanner / browser
|
||||
|
||||
Если Telemt не обнаруживает валидный MTProxy-ключ:
|
||||
|
||||
соединение **переключается в режим TCP-Splitting / TCP-Splicing**.
|
||||
|
||||
В этом режиме Telemt:
|
||||
|
||||
1. открывает новое TCP-соединение к
|
||||
|
||||
```
|
||||
mask_host:mask_port
|
||||
```
|
||||
|
||||
2. начинает **проксировать TCP-трафик**
|
||||
|
||||
Важно:
|
||||
|
||||
* клиентский TLS-запрос **НЕ модифицируется**
|
||||
* **ClientHello передаётся "как есть", без изменений**
|
||||
* **SNI остаётся неизменным**
|
||||
* Telemt **не завершает TLS-рукопожатие**, а только перенаправляет его на более низком уровне сетевого стека - L4
|
||||
|
||||
Таким образом upstream-сервер получает **оригинальное TLS-соединение клиента**:
|
||||
|
||||
- если это nginx-заглушка, он просто отдаёт обычный сайт
|
||||
- для внешнего наблюдателя это выглядит как обычный HTTPS-сервер
|
||||
|
||||
# TCP-S / TCP-Splitting / TCP-Splicing
|
||||
|
||||
Ключевые свойства механизма:
|
||||
|
||||
**Telemt работает как TCP-переключатель:**
|
||||
|
||||
1) принимает соединение
|
||||
2️) определяет тип клиента
|
||||
3) либо:
|
||||
|
||||
- обрабатывает MTProxy внутри
|
||||
- либо проксирует TCP-поток
|
||||
|
||||
При проксировании:
|
||||
|
||||
- Telemt **разрешает `mask_host` в IP**
|
||||
- устанавливает TCP-соединение
|
||||
- начинает **bidirectional TCP relay**
|
||||
|
||||
При этом:
|
||||
|
||||
- TLS-рукопожатие происходит **между клиентом и `mask_host`**
|
||||
- Telemt выступает только **на уровне L4 - как TCP-релей**, такой же как HAProxy в TCP-режиме
|
||||
|
||||
# Использование чужого домена
|
||||
|
||||
Можно использовать и внешний сайт.
|
||||
|
||||
Например:
|
||||
|
||||
```toml
|
||||
[censorship]
|
||||
tls_domain = "github.com"
|
||||
mask_host = "github.com"
|
||||
mask_port = 443
|
||||
```
|
||||
|
||||
или
|
||||
|
||||
```toml
|
||||
[censorship]
|
||||
mask_host = "140.82.121.4"
|
||||
```
|
||||
|
||||
В этом случае:
|
||||
|
||||
- цензор видит **TLS-подключение к github.com**
|
||||
- обычные клиенты/краулер действительно получают **настоящий GitHub**
|
||||
|
||||
Telemt просто **проксирует TCP-соединение на GitHub**
|
||||
|
||||
# Что видит анализатор трафика?
|
||||
|
||||
Для DPI это выглядит так:
|
||||
|
||||
```
|
||||
client -> TLS -> github.com
|
||||
```
|
||||
|
||||
или
|
||||
|
||||
```
|
||||
client -> TLS -> umweltschutz.de
|
||||
```
|
||||
|
||||
TLS-handshake выглядит валидным, SNI соответствует домену, сертификат корректный - от целевого `mask_host:mask_port`
|
||||
|
||||
# Что видит сканер / краулер?
|
||||
|
||||
Если сканер попытается подключиться:
|
||||
|
||||
```
|
||||
openssl s_client -connect 198.18.88.88:443 -servername umweltschutz.de
|
||||
```
|
||||
|
||||
он получит **обычный HTTPS-сайт-заглушку**
|
||||
|
||||
Потому что:
|
||||
|
||||
- он не предъявил MTProxy-ключ
|
||||
- Telemt отправил соединение на `mask_host:mask_port`, на котором находится nginx
|
||||
|
||||
# Какую проблему решает TLS-Fronting / TCP-Splitting?
|
||||
|
||||
Эта архитектура решает сразу несколько проблем обхода цензуры.
|
||||
|
||||
## 1 Закрытие плоскости MTProxy от активного сканирования
|
||||
|
||||
Многие цензоры:
|
||||
|
||||
- сканируют IP-адреса
|
||||
- проверяют известные сигнатуры прокси
|
||||
|
||||
Telemt отвечает на такие проверки **обычным HTTPS-сайтом**, поэтому прокси невозможно обнаружить простым сканированием
|
||||
|
||||
---
|
||||
|
||||
## 2 Маскировка трафика под легитимный TLS
|
||||
|
||||
Для DPI-систем соединение выглядит как:
|
||||
|
||||
```
|
||||
обычный TLS-трафик к популярному домену
|
||||
```
|
||||
|
||||
Это делает блокировку значительно сложнее и непредсказуемее
|
||||
|
||||
---
|
||||
|
||||
## 3 Устойчивость к протокольному анализу
|
||||
|
||||
MTProxy трафик проходит **внутри TLS-like-потока**, поэтому:
|
||||
|
||||
- не видны характерные сигнатуры MTProto
|
||||
- соединение выглядит как обычный HTTPS
|
||||
|
||||
---
|
||||
|
||||
## 4 Правдоподобное поведение сервера
|
||||
|
||||
Даже если краулер:
|
||||
|
||||
- подключится сам
|
||||
- выполнит TLS-handshake
|
||||
- попытается получить HTTP-ответ
|
||||
|
||||
он увидит **реальный сайт**, а не telemt
|
||||
|
||||
Это устраняет один из главных признаков для антифрод-краулеров мобильных операторов
|
||||
|
||||
# Схема
|
||||
|
||||
```text
|
||||
Client
|
||||
│
|
||||
│ TCP
|
||||
│
|
||||
V
|
||||
Telemt
|
||||
│
|
||||
├── valid MTProxy key
|
||||
│ │
|
||||
│ V
|
||||
│ MTProxy logic
|
||||
│
|
||||
└── обычный TLS клиент
|
||||
│
|
||||
V
|
||||
TCP-Splitting
|
||||
│
|
||||
V
|
||||
mask_host:mask_port
|
||||
```
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 650 KiB |
|
|
@ -0,0 +1,285 @@
|
|||
# Telemt Runtime Model
|
||||
|
||||
## Scope
|
||||
This document defines runtime concepts used by the Middle-End (ME) transport pipeline and the orchestration logic around it.
|
||||
|
||||
It focuses on:
|
||||
- `ME Pool / Reader / Writer / Refill / Registry`
|
||||
- `Adaptive Floor`
|
||||
- `Trio-State`
|
||||
- `Generation Lifecycle`
|
||||
|
||||
## Core Entities
|
||||
|
||||
### ME Pool
|
||||
`ME Pool` is the runtime orchestrator for all Middle-End writers.
|
||||
|
||||
Responsibilities:
|
||||
- Holds writer inventory by DC/family/endpoint.
|
||||
- Maintains routing primitives and writer selection policy.
|
||||
- Tracks generation state (`active`, `warm`, `draining` context).
|
||||
- Applies runtime policies (floor mode, refill, reconnect, reinit, fallback behavior).
|
||||
- Exposes readiness gates used by admission logic (for conditional accept/cast behavior).
|
||||
|
||||
Non-goals:
|
||||
- It does not own client protocol decoding.
|
||||
- It does not own per-client business policy (quotas/limits).
|
||||
|
||||
### ME Writer
|
||||
`ME Writer` is a long-lived ME RPC tunnel bound to one concrete ME endpoint (`ip:port`), with:
|
||||
- Outbound command channel (send path).
|
||||
- Associated reader loop (inbound path).
|
||||
- Health/degraded flags.
|
||||
- Contour/state and generation metadata.
|
||||
|
||||
A writer is the actual data plane carrier for client sessions once bound.
|
||||
|
||||
### ME Reader
|
||||
`ME Reader` is the inbound parser/dispatcher for one writer:
|
||||
- Reads/decrypts ME RPC frames.
|
||||
- Validates sequence/checksum.
|
||||
- Routes payloads to client-connection channels via `Registry`.
|
||||
- Emits close/ack/data events and updates telemetry.
|
||||
|
||||
Design intent:
|
||||
- Reader must stay non-blocking as much as possible.
|
||||
- Backpressure on a single client route must not stall the whole writer stream.
|
||||
|
||||
### Refill
|
||||
`Refill` is the recovery mechanism that restores writer coverage when capacity drops:
|
||||
- Per-endpoint restore (same endpoint first).
|
||||
- Per-DC restore to satisfy required floor.
|
||||
- Optional outage-mode/shadow behavior for fragile single-endpoint DCs.
|
||||
|
||||
Refill works asynchronously and should not block hot routing paths.
|
||||
|
||||
### Registry
|
||||
`Registry` is the routing index between ME and client sessions:
|
||||
- `conn_id -> client response channel`
|
||||
- `conn_id <-> writer_id` binding map
|
||||
- writer activity snapshots and idle tracking
|
||||
|
||||
Main invariants:
|
||||
- A `conn_id` routes to at most one active response channel.
|
||||
- Writer loss triggers safe unbind/cleanup and close propagation.
|
||||
- Registry state is the source of truth for active ME-bound session mapping.
|
||||
|
||||
## Adaptive Floor
|
||||
|
||||
### What it is
|
||||
`Adaptive Floor` is a runtime policy that changes target writer count per DC based on observed activity, instead of always holding static peak floor.
|
||||
|
||||
### Why it exists
|
||||
Goals:
|
||||
- Reduce idle writer churn under low traffic.
|
||||
- Keep enough warm capacity to avoid client-visible stalls on burst recovery.
|
||||
- Limit needless reconnect storms on unstable endpoints.
|
||||
|
||||
### Behavioral model
|
||||
- Under activity: floor converges toward configured static requirement.
|
||||
- Under prolonged idle: floor can shrink to a safe minimum.
|
||||
- Recovery/grace windows prevent aggressive oscillation.
|
||||
|
||||
### Safety constraints
|
||||
- Never violate minimal survivability floor for a DC group.
|
||||
- Refill must still restore quickly on demand.
|
||||
- Floor adaptation must not force-drop already bound healthy sessions.
|
||||
|
||||
## Trio-State
|
||||
|
||||
`Trio-State` is writer contouring:
|
||||
- `Warm`
|
||||
- `Active`
|
||||
- `Draining`
|
||||
|
||||
### State semantics
|
||||
- `Warm`: connected and validated, not primary for new binds.
|
||||
- `Active`: preferred for new binds and normal traffic.
|
||||
- `Draining`: no new regular binds; existing sessions continue until graceful retirement rules apply.
|
||||
|
||||
### Transition intent
|
||||
- `Warm -> Active`: when coverage/readiness conditions are satisfied.
|
||||
- `Active -> Draining`: on generation swap, endpoint replacement, or controlled retirement.
|
||||
- `Draining -> removed`: after drain TTL/force-close policy (or when naturally empty).
|
||||
|
||||
This separation reduces SPOF and keeps cutovers predictable.
|
||||
|
||||
## Generation Lifecycle
|
||||
|
||||
Generation isolates pool epochs during reinit/reconfiguration.
|
||||
|
||||
### Lifecycle phases
|
||||
1. `Bootstrap`: initial writers are established.
|
||||
2. `Warmup`: next generation writers are created and validated.
|
||||
3. `Activation`: generation promoted to active when coverage gate passes.
|
||||
4. `Drain`: previous generation becomes draining, existing sessions are allowed to finish.
|
||||
5. `Retire`: old generation writers are removed after graceful rules.
|
||||
|
||||
### Operational guarantees
|
||||
- No partial generation activation without minimum coverage.
|
||||
- Existing healthy client sessions should not be dropped just because a new generation appears.
|
||||
- Draining generation exists to absorb in-flight traffic during swap.
|
||||
|
||||
### Readiness and admission
|
||||
Pool readiness is not equivalent to “all endpoints fully saturated”.
|
||||
Typical gating strategy:
|
||||
- Open admission when per-DC minimal alive coverage exists.
|
||||
- Continue background saturation for multi-endpoint DCs.
|
||||
|
||||
This keeps startup latency low while preserving eventual full capacity.
|
||||
|
||||
## Interactions Between Concepts
|
||||
|
||||
- `Generation` defines pool epochs.
|
||||
- `Trio-State` defines per-writer role inside/around those epochs.
|
||||
- `Adaptive Floor` defines how much capacity should be maintained right now.
|
||||
- `Refill` is the actuator that closes the gap between desired and current capacity.
|
||||
- `Registry` keeps per-session routing correctness while all of the above changes over time.
|
||||
|
||||
## Architectural Approach
|
||||
|
||||
### Layered Design
|
||||
The runtime is intentionally split into two planes:
|
||||
- `Control Plane`: decides desired topology and policy (`floor`, `generation swap`, `refill`, `fallback`).
|
||||
- `Data Plane`: executes packet/session transport (`reader`, `writer`, routing, acks, close propagation).
|
||||
|
||||
Architectural rule:
|
||||
- Control Plane may change writer inventory and policy.
|
||||
- Data Plane must remain stable and low-latency while those changes happen.
|
||||
|
||||
### Ownership Model
|
||||
Ownership is centered around explicit state domains:
|
||||
- `MePool` owns writer lifecycle and policy state.
|
||||
- `Registry` owns per-connection routing bindings.
|
||||
- `Writer task` owns outbound ME socket send progression.
|
||||
- `Reader task` owns inbound ME socket parsing and event dispatch.
|
||||
|
||||
This prevents accidental cross-layer mutation and keeps invariants local.
|
||||
|
||||
### Control Plane Responsibilities
|
||||
Control Plane is event-driven and policy-driven:
|
||||
- Startup initialization and readiness gates.
|
||||
- Runtime reinit (periodic or config-triggered).
|
||||
- Coverage checks per DC/family/endpoint group.
|
||||
- Floor enforcement (static/adaptive).
|
||||
- Refill scheduling and retry orchestration.
|
||||
- Generation transition (`warm -> active`, previous `active -> draining`).
|
||||
|
||||
Control Plane must prioritize determinism over short-term aggressiveness.
|
||||
|
||||
### Data Plane Responsibilities
|
||||
Data Plane is throughput-first and allocation-sensitive:
|
||||
- Session bind to writer.
|
||||
- Per-frame parsing/validation and dispatch.
|
||||
- Ack and close signal propagation.
|
||||
- Route drop behavior under missing connection or closed channel.
|
||||
- Minimal critical logging in hot path.
|
||||
|
||||
Data Plane should avoid waiting on operations that are not strictly required for frame correctness.
|
||||
|
||||
## Concurrency and Synchronization
|
||||
|
||||
### Concurrency Principles
|
||||
- Per-writer isolation: each writer has independent send/read task loops.
|
||||
- Per-connection isolation: client channel state is scoped by `conn_id`.
|
||||
- Asynchronous recovery: refill/reconnect runs outside the packet hot path.
|
||||
|
||||
### Synchronization Strategy
|
||||
- Shared maps use fine-grained, short-lived locking.
|
||||
- Read-mostly paths avoid broad write-lock windows.
|
||||
- Backpressure decisions are localized at route/channel boundary.
|
||||
|
||||
Design target:
|
||||
- A slow consumer should degrade only itself (or its route), not global writer progress.
|
||||
|
||||
### Cancellation and Shutdown
|
||||
Writer and reader loops are cancellation-aware:
|
||||
- explicit cancel token / close command support;
|
||||
- safe unbind and cleanup via registry;
|
||||
- deterministic order: stop admission -> drain/close -> release resources.
|
||||
|
||||
## Consistency Model
|
||||
|
||||
### Session Consistency
|
||||
For one `conn_id`:
|
||||
- exactly one active route target at a time;
|
||||
- close and unbind must be idempotent;
|
||||
- writer loss must not leave dangling bindings.
|
||||
|
||||
### Generation Consistency
|
||||
Generational consistency guarantees:
|
||||
- New generation is not promoted before minimum coverage gate.
|
||||
- Previous generation remains available in `draining` state during handover.
|
||||
- Forced retirement is policy-bound (`drain ttl`, optional force-close), not immediate.
|
||||
|
||||
### Policy Consistency
|
||||
Policy changes (`adaptive/static floor`, fallback mode, retries) should apply without violating established active-session routing invariants.
|
||||
|
||||
## Backpressure and Flow Control
|
||||
|
||||
### Route-Level Backpressure
|
||||
Route channels are bounded by design.
|
||||
When pressure increases:
|
||||
- short burst absorption is allowed;
|
||||
- prolonged congestion triggers controlled drop semantics;
|
||||
- drop accounting is explicit via metrics/counters.
|
||||
|
||||
### Reader Non-Blocking Priority
|
||||
Inbound ME reader path should never be serialized behind one congested client route.
|
||||
Practical implication:
|
||||
- prefer non-blocking route attempt in the parser loop;
|
||||
- move heavy recovery to async side paths.
|
||||
|
||||
## Failure Domain Strategy
|
||||
|
||||
### Endpoint-Level Failure
|
||||
Failure of one endpoint should trigger endpoint-scoped recovery first:
|
||||
- same endpoint reconnect;
|
||||
- endpoint replacement within same DC group if applicable.
|
||||
|
||||
### DC-Level Degradation
|
||||
If a DC group cannot satisfy floor:
|
||||
- keep service via remaining coverage if policy allows;
|
||||
- continue asynchronous refill saturation in background.
|
||||
|
||||
### Whole-Pool Readiness Loss
|
||||
If no sufficient ME coverage exists:
|
||||
- admission gate can hold new accepts (conditional policy);
|
||||
- existing sessions should continue when their path remains healthy.
|
||||
|
||||
## Performance Architecture Notes
|
||||
|
||||
### Hotpath Discipline
|
||||
Allowed in hotpath:
|
||||
- fixed-size parsing and cheap validation;
|
||||
- bounded channel operations;
|
||||
- precomputed or low-allocation access patterns.
|
||||
|
||||
Avoid in hotpath:
|
||||
- repeated expensive decoding;
|
||||
- broad locks with awaits inside critical sections;
|
||||
- verbose high-frequency logging.
|
||||
|
||||
### Throughput Stability Over Peak Spikes
|
||||
Architecture prefers stable throughput and predictable latency over short peak gains that increase churn or long-tail reconnect times.
|
||||
|
||||
## Evolution and Extension Rules
|
||||
|
||||
To evolve this model safely:
|
||||
- Add new policy knobs in Control Plane first.
|
||||
- Keep Data Plane contracts stable (`conn_id`, route semantics, close semantics).
|
||||
- Validate generation and registry invariants before enabling by default.
|
||||
- Introduce new retry/recovery strategies behind explicit config.
|
||||
|
||||
## Failure and Recovery Notes
|
||||
|
||||
- Single-endpoint DC failure is a normal degraded mode case; policy should prioritize fast reconnect and optional shadow/probing strategies.
|
||||
- Idle close by peer should be treated as expected when upstream enforces idle timeout.
|
||||
- Reconnect backoff must protect against synchronized churn while still allowing fast first retries.
|
||||
- Fallback (`ME -> direct DC`) is a policy switch, not a transport bug by itself.
|
||||
|
||||
## Terminology Summary
|
||||
- `Coverage`: enough live writers to satisfy per-DC acceptance policy.
|
||||
- `Floor`: target minimum writer count policy.
|
||||
- `Churn`: frequent writer reconnect/remove cycles.
|
||||
- `Hotpath`: per-packet/per-connection data path where extra waits/allocations are expensive.
|
||||
|
|
@ -0,0 +1,285 @@
|
|||
# Runtime-модель Telemt
|
||||
|
||||
## Область описания
|
||||
Документ фиксирует ключевые runtime-понятия пайплайна Middle-End (ME) и оркестрации вокруг него.
|
||||
|
||||
Фокус:
|
||||
- `ME Pool / Reader / Writer / Refill / Registry`
|
||||
- `Adaptive Floor`
|
||||
- `Trio-State`
|
||||
- `Generation Lifecycle`
|
||||
|
||||
## Базовые сущности
|
||||
|
||||
### ME Pool
|
||||
`ME Pool` — центральный оркестратор всех Middle-End writer-ов.
|
||||
|
||||
Зона ответственности:
|
||||
- хранит инвентарь writer-ов по DC/family/endpoint;
|
||||
- управляет выбором writer-а и маршрутизацией;
|
||||
- ведёт состояние поколений (`active`, `warm`, `draining` контекст);
|
||||
- применяет runtime-политики (floor, refill, reconnect, reinit, fallback);
|
||||
- отдаёт сигналы готовности для admission-логики (conditional accept/cast).
|
||||
|
||||
Что не делает:
|
||||
- не декодирует клиентский протокол;
|
||||
- не реализует бизнес-политику пользователя (квоты/лимиты).
|
||||
|
||||
### ME Writer
|
||||
`ME Writer` — долгоживущий ME RPC-канал к конкретному endpoint (`ip:port`), у которого есть:
|
||||
- канал команд на отправку;
|
||||
- связанный reader loop для входящего потока;
|
||||
- флаги состояния/деградации;
|
||||
- метаданные contour/state и generation.
|
||||
|
||||
Writer — это фактический data-plane носитель клиентских сессий после бинда.
|
||||
|
||||
### ME Reader
|
||||
`ME Reader` — входной parser/dispatcher одного writer-а:
|
||||
- читает и расшифровывает ME RPC-фреймы;
|
||||
- проверяет sequence/checksum;
|
||||
- маршрутизирует payload в client-каналы через `Registry`;
|
||||
- обрабатывает close/ack/data и обновляет телеметрию.
|
||||
|
||||
Инженерный принцип:
|
||||
- Reader должен оставаться неблокирующим.
|
||||
- Backpressure одной клиентской сессии не должен останавливать весь поток writer-а.
|
||||
|
||||
### Refill
|
||||
`Refill` — механизм восстановления покрытия writer-ов при просадке:
|
||||
- восстановление на том же endpoint в первую очередь;
|
||||
- восстановление по DC до требуемого floor;
|
||||
- опциональные outage/shadow-режимы для хрупких single-endpoint DC.
|
||||
|
||||
Refill работает асинхронно и не должен блокировать hotpath.
|
||||
|
||||
### Registry
|
||||
`Registry` — маршрутизационный индекс между ME и клиентскими сессиями:
|
||||
- `conn_id -> канал ответа клиенту`;
|
||||
- map биндов `conn_id <-> writer_id`;
|
||||
- снимки активности writer-ов и idle-трекинг.
|
||||
|
||||
Ключевые инварианты:
|
||||
- один `conn_id` маршрутизируется максимум в один активный канал ответа;
|
||||
- потеря writer-а приводит к безопасному unbind/cleanup и отправке close;
|
||||
- именно `Registry` является источником истины по активным ME-биндам.
|
||||
|
||||
## Adaptive Floor
|
||||
|
||||
### Что это
|
||||
`Adaptive Floor` — runtime-политика, которая динамически меняет целевое число writer-ов на DC в зависимости от активности, а не держит всегда фиксированный статический floor.
|
||||
|
||||
### Зачем
|
||||
Цели:
|
||||
- уменьшить churn на idle-трафике;
|
||||
- сохранить достаточную прогретую ёмкость для быстрых всплесков;
|
||||
- снизить лишние reconnect-штормы на нестабильных endpoint.
|
||||
|
||||
### Модель поведения
|
||||
- при активности floor стремится к статическому требованию;
|
||||
- при длительном idle floor может снижаться до безопасного минимума;
|
||||
- grace/recovery окна не дают системе "флапать" слишком резко.
|
||||
|
||||
### Ограничения безопасности
|
||||
- нельзя нарушать минимальный floor выживаемости DC-группы;
|
||||
- refill обязан быстро нарастить покрытие по запросу;
|
||||
- адаптация не должна принудительно ронять уже привязанные healthy-сессии.
|
||||
|
||||
## Trio-State
|
||||
|
||||
`Trio-State` — контурная роль writer-а:
|
||||
- `Warm`
|
||||
- `Active`
|
||||
- `Draining`
|
||||
|
||||
### Семантика состояний
|
||||
- `Warm`: writer подключён и валиден, но не основной для новых биндов.
|
||||
- `Active`: приоритетный для новых биндов и обычного трафика.
|
||||
- `Draining`: новые обычные бинды не назначаются; текущие сессии живут до правил graceful-вывода.
|
||||
|
||||
### Логика переходов
|
||||
- `Warm -> Active`: когда достигнуты условия покрытия/готовности.
|
||||
- `Active -> Draining`: при swap поколения, замене endpoint или контролируемом выводе.
|
||||
- `Draining -> removed`: после drain TTL/force-close политики (или естественного опустошения).
|
||||
|
||||
Такое разделение снижает SPOF-риски и делает cutover предсказуемым.
|
||||
|
||||
## Generation Lifecycle
|
||||
|
||||
Generation изолирует эпохи пула при reinit/reconfiguration.
|
||||
|
||||
### Фазы жизненного цикла
|
||||
1. `Bootstrap`: поднимается начальный набор writer-ов.
|
||||
2. `Warmup`: создаётся и валидируется новое поколение.
|
||||
3. `Activation`: новое поколение становится active после прохождения coverage-gate.
|
||||
4. `Drain`: предыдущее поколение переводится в draining, текущим сессиям дают завершиться.
|
||||
5. `Retire`: старое поколение удаляется по graceful-правилам.
|
||||
|
||||
### Операционные гарантии
|
||||
- нельзя активировать поколение частично без минимального покрытия;
|
||||
- healthy-клиенты не должны теряться только из-за появления нового поколения;
|
||||
- draining-поколение служит буфером для in-flight трафика во время swap.
|
||||
|
||||
### Готовность и приём клиентов
|
||||
Готовность пула не равна "все endpoint полностью насыщены".
|
||||
Типичная стратегия:
|
||||
- открыть admission при минимально достаточном alive-покрытии по DC;
|
||||
- параллельно продолжать saturation для multi-endpoint DC.
|
||||
|
||||
Это уменьшает startup latency и сохраняет выход на полную ёмкость.
|
||||
|
||||
## Как понятия связаны между собой
|
||||
|
||||
- `Generation` задаёт эпохи пула.
|
||||
- `Trio-State` задаёт роль каждого writer-а внутри/между эпохами.
|
||||
- `Adaptive Floor` задаёт, сколько ёмкости нужно сейчас.
|
||||
- `Refill` — исполнитель, который закрывает разницу между desired и current capacity.
|
||||
- `Registry` гарантирует корректную маршрутизацию сессий, пока всё выше меняется.
|
||||
|
||||
## Архитектурный подход
|
||||
|
||||
### Слоистая модель
|
||||
Runtime специально разделён на две плоскости:
|
||||
- `Control Plane`: принимает решения о целевой топологии и политиках (`floor`, `generation swap`, `refill`, `fallback`).
|
||||
- `Data Plane`: исполняет транспорт сессий и пакетов (`reader`, `writer`, маршрутизация, ack, close).
|
||||
|
||||
Ключевое правило:
|
||||
- Control Plane может менять состав writer-ов и policy.
|
||||
- Data Plane должен оставаться стабильным и низколатентным в момент этих изменений.
|
||||
|
||||
### Модель владения состоянием
|
||||
Владение разделено по доменам:
|
||||
- `MePool` владеет жизненным циклом writer-ов и policy-state.
|
||||
- `Registry` владеет routing-биндами клиентских сессий.
|
||||
- `Writer task` владеет исходящей прогрессией ME-сокета.
|
||||
- `Reader task` владеет входящим парсингом и dispatch-событиями.
|
||||
|
||||
Это ограничивает побочные мутации и локализует инварианты.
|
||||
|
||||
### Обязанности Control Plane
|
||||
Control Plane работает событийно и policy-ориентированно:
|
||||
- стартовая инициализация и readiness-gate;
|
||||
- runtime reinit (периодический и/или по изменению конфигурации);
|
||||
- проверки покрытия по DC/family/endpoint group;
|
||||
- применение floor-политики (static/adaptive);
|
||||
- планирование refill и orchestration retry;
|
||||
- переходы поколений (`warm -> active`, прежний `active -> draining`).
|
||||
|
||||
Для него важнее детерминизм, чем агрессивная краткосрочная реакция.
|
||||
|
||||
### Обязанности Data Plane
|
||||
Data Plane ориентирован на пропускную способность и предсказуемую задержку:
|
||||
- bind клиентской сессии к writer-у;
|
||||
- per-frame parsing/validation/dispatch;
|
||||
- распространение ack/close;
|
||||
- корректная реакция на missing conn/closed channel;
|
||||
- минимальный лог-шум в hotpath.
|
||||
|
||||
Data Plane не должен ждать операций, не критичных для корректности текущего фрейма.
|
||||
|
||||
## Конкурентность и синхронизация
|
||||
|
||||
### Принципы конкурентности
|
||||
- Изоляция по writer-у: у каждого writer-а независимые send/read loop.
|
||||
- Изоляция по сессии: состояние канала локально для `conn_id`.
|
||||
- Асинхронное восстановление: refill/reconnect выполняются вне пакетного hotpath.
|
||||
|
||||
### Стратегия синхронизации
|
||||
- Для shared map используются короткие и узкие lock-секции.
|
||||
- Read-heavy пути избегают длительных write-lock окон.
|
||||
- Решения по backpressure локализованы на границе route/channel.
|
||||
|
||||
Цель:
|
||||
- медленный consumer должен деградировать локально, не останавливая глобальный прогресс writer-а.
|
||||
|
||||
### Cancellation и shutdown
|
||||
Reader/Writer loop должны быть cancellation-aware:
|
||||
- явные cancel token / close command;
|
||||
- безопасный unbind/cleanup через registry;
|
||||
- детерминированный порядок: stop admission -> drain/close -> release resources.
|
||||
|
||||
## Модель согласованности
|
||||
|
||||
### Согласованность сессии
|
||||
Для одного `conn_id`:
|
||||
- одновременно ровно один активный route-target;
|
||||
- close/unbind операции идемпотентны;
|
||||
- потеря writer-а не оставляет dangling-бинды.
|
||||
|
||||
### Согласованность поколения
|
||||
Гарантии generation:
|
||||
- новое поколение не активируется до прохождения минимального coverage-gate;
|
||||
- предыдущее поколение остаётся в `draining` на время handover;
|
||||
- принудительный вывод writer-ов ограничен policy (`drain ttl`, optional force-close), а не мгновенный.
|
||||
|
||||
### Согласованность политик
|
||||
Изменение policy (`adaptive/static floor`, fallback mode, retries) не должно ломать инварианты маршрутизации уже активных сессий.
|
||||
|
||||
## Backpressure и управление потоком
|
||||
|
||||
### Route-level backpressure
|
||||
Route-каналы намеренно bounded.
|
||||
При росте нагрузки:
|
||||
- кратковременный burst поглощается;
|
||||
- длительная перегрузка переходит в контролируемую drop-семантику;
|
||||
- все drop-сценарии должны быть прозрачно видны в метриках.
|
||||
|
||||
### Приоритет неблокирующего Reader
|
||||
Входящий ME-reader path не должен сериализоваться из-за одной перегруженной клиентской сессии.
|
||||
Практически это означает:
|
||||
- использовать неблокирующую попытку route в parser loop;
|
||||
- выносить тяжёлое восстановление в асинхронные side-path.
|
||||
|
||||
## Стратегия доменов отказа
|
||||
|
||||
### Отказ отдельного endpoint
|
||||
Сначала применяется endpoint-local recovery:
|
||||
- reconnect в тот же endpoint;
|
||||
- затем замена endpoint внутри той же DC-группы (если доступно).
|
||||
|
||||
### Деградация уровня DC
|
||||
Если DC-группа не набирает floor:
|
||||
- сервис сохраняется на остаточном покрытии (если policy разрешает);
|
||||
- saturation refill продолжается асинхронно в фоне.
|
||||
|
||||
### Потеря готовности всего пула
|
||||
Если достаточного ME-покрытия нет:
|
||||
- admission gate может временно закрыть приём новых подключений (conditional policy);
|
||||
- уже активные сессии продолжают работать, пока их маршрут остаётся healthy.
|
||||
|
||||
## Архитектурные заметки по производительности
|
||||
|
||||
### Дисциплина hotpath
|
||||
Допустимо в hotpath:
|
||||
- фиксированный и дешёвый parsing/validation;
|
||||
- bounded channel operations;
|
||||
- precomputed/low-allocation доступ к данным.
|
||||
|
||||
Нежелательно в hotpath:
|
||||
- повторные дорогие decode;
|
||||
- широкие lock-секции с `await` внутри;
|
||||
- высокочастотный подробный logging.
|
||||
|
||||
### Стабильность важнее пиков
|
||||
Архитектура приоритетно выбирает стабильную пропускную способность и предсказуемую latency, а не краткосрочные пики ценой churn и long-tail reconnect.
|
||||
|
||||
## Правила эволюции модели
|
||||
|
||||
Чтобы расширять модель безопасно:
|
||||
- новые policy knobs сначала внедрять в Control Plane;
|
||||
- контракты Data Plane (`conn_id`, route/close семантика) держать стабильными;
|
||||
- перед дефолтным включением проверять generation/registry инварианты;
|
||||
- новые recovery/retry стратегии вводить через явный config-флаг.
|
||||
|
||||
## Нюансы отказов и восстановления
|
||||
|
||||
- падение single-endpoint DC — штатный деградированный сценарий; приоритет: быстрый reconnect и, при необходимости, shadow/probing;
|
||||
- idle-close со стороны peer должен считаться нормальным событием при upstream idle-timeout;
|
||||
- backoff reconnect-логики должен ограничивать синхронный churn, но сохранять быстрые первые попытки;
|
||||
- fallback (`ME -> direct DC`) — это переключаемая policy-ветка, а не автоматический признак бага транспорта.
|
||||
|
||||
## Краткий словарь
|
||||
- `Coverage`: достаточное число живых writer-ов для политики приёма по DC.
|
||||
- `Floor`: целевая минимальная ёмкость writer-ов.
|
||||
- `Churn`: частые циклы reconnect/remove writer-ов.
|
||||
- `Hotpath`: пер-пакетный/пер-коннектный путь, где любые лишние ожидания и аллокации особенно дороги.
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 838 KiB |
764
install.sh
764
install.sh
|
|
@ -1,73 +1,719 @@
|
|||
sudo bash -c '
|
||||
set -e
|
||||
#!/bin/sh
|
||||
set -eu
|
||||
|
||||
# --- Проверка на существующую установку ---
|
||||
if systemctl list-unit-files | grep -q telemt.service; then
|
||||
# --- РЕЖИМ ОБНОВЛЕНИЯ ---
|
||||
echo "--- Обнаружена существующая установка Telemt. Запускаю обновление... ---"
|
||||
REPO="${REPO:-telemt/telemt}"
|
||||
BIN_NAME="${BIN_NAME:-telemt}"
|
||||
INSTALL_DIR="${INSTALL_DIR:-/bin}"
|
||||
CONFIG_DIR="${CONFIG_DIR:-/etc/telemt}"
|
||||
CONFIG_FILE="${CONFIG_FILE:-${CONFIG_DIR}/telemt.toml}"
|
||||
WORK_DIR="${WORK_DIR:-/opt/telemt}"
|
||||
TLS_DOMAIN="${TLS_DOMAIN:-petrovich.ru}"
|
||||
SERVER_PORT="${SERVER_PORT:-443}"
|
||||
USER_SECRET=""
|
||||
AD_TAG=""
|
||||
SERVICE_NAME="telemt"
|
||||
TEMP_DIR=""
|
||||
SUDO=""
|
||||
CONFIG_PARENT_DIR=""
|
||||
SERVICE_START_FAILED=0
|
||||
|
||||
echo "[*] Остановка службы telemt..."
|
||||
systemctl stop telemt || true # Игнорируем ошибку, если служба уже остановлена
|
||||
PORT_PROVIDED=0
|
||||
SECRET_PROVIDED=0
|
||||
AD_TAG_PROVIDED=0
|
||||
DOMAIN_PROVIDED=0
|
||||
|
||||
echo "[1/2] Скачивание последней версии Telemt..."
|
||||
wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz
|
||||
ACTION="install"
|
||||
TARGET_VERSION="${VERSION:-latest}"
|
||||
|
||||
echo "[1/2] Замена исполняемого файла в /usr/local/bin..."
|
||||
mv telemt /usr/local/bin/telemt
|
||||
chmod +x /usr/local/bin/telemt
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
-h|--help) ACTION="help"; shift ;;
|
||||
-d|--domain)
|
||||
if [ "$#" -lt 2 ] || [ -z "$2" ]; then
|
||||
printf '[ERROR] %s requires a domain argument.\n' "$1" >&2
|
||||
exit 1
|
||||
fi
|
||||
TLS_DOMAIN="$2"; DOMAIN_PROVIDED=1; shift 2 ;;
|
||||
-p|--port)
|
||||
if [ "$#" -lt 2 ] || [ -z "$2" ]; then
|
||||
printf '[ERROR] %s requires a port argument.\n' "$1" >&2; exit 1
|
||||
fi
|
||||
case "$2" in
|
||||
*[!0-9]*) printf '[ERROR] Port must be a valid number.\n' >&2; exit 1 ;;
|
||||
esac
|
||||
port_num="$(printf '%s\n' "$2" | sed 's/^0*//')"
|
||||
[ -z "$port_num" ] && port_num="0"
|
||||
if [ "${#port_num}" -gt 5 ] || [ "$port_num" -lt 1 ] || [ "$port_num" -gt 65535 ]; then
|
||||
printf '[ERROR] Port must be between 1 and 65535.\n' >&2; exit 1
|
||||
fi
|
||||
SERVER_PORT="$port_num"; PORT_PROVIDED=1; shift 2 ;;
|
||||
-s|--secret)
|
||||
if [ "$#" -lt 2 ] || [ -z "$2" ]; then
|
||||
printf '[ERROR] %s requires a secret argument.\n' "$1" >&2; exit 1
|
||||
fi
|
||||
case "$2" in
|
||||
*[!0-9a-fA-F]*)
|
||||
printf '[ERROR] Secret must contain only hex characters.\n' >&2; exit 1 ;;
|
||||
esac
|
||||
if [ "${#2}" -ne 32 ]; then
|
||||
printf '[ERROR] Secret must be exactly 32 chars.\n' >&2; exit 1
|
||||
fi
|
||||
USER_SECRET="$2"; SECRET_PROVIDED=1; shift 2 ;;
|
||||
-a|--ad-tag|--ad_tag)
|
||||
if [ "$#" -lt 2 ] || [ -z "$2" ]; then
|
||||
printf '[ERROR] %s requires an ad_tag argument.\n' "$1" >&2; exit 1
|
||||
fi
|
||||
AD_TAG="$2"; AD_TAG_PROVIDED=1; shift 2 ;;
|
||||
uninstall|--uninstall)
|
||||
if [ "$ACTION" != "purge" ]; then ACTION="uninstall"; fi
|
||||
shift ;;
|
||||
purge|--purge) ACTION="purge"; shift ;;
|
||||
install|--install) ACTION="install"; shift ;;
|
||||
-*) printf '[ERROR] Unknown option: %s\n' "$1" >&2; exit 1 ;;
|
||||
*)
|
||||
if [ "$ACTION" = "install" ]; then TARGET_VERSION="$1"
|
||||
else printf '[WARNING] Ignoring extra argument: %s\n' "$1" >&2; fi
|
||||
shift ;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "[2/2] Запуск службы..."
|
||||
systemctl start telemt
|
||||
say() {
|
||||
if [ "$#" -eq 0 ] || [ -z "${1:-}" ]; then
|
||||
printf '\n'
|
||||
else
|
||||
printf '[INFO] %s\n' "$*"
|
||||
fi
|
||||
}
|
||||
die() { printf '[ERROR] %s\n' "$*" >&2; exit 1; }
|
||||
|
||||
echo "--- Обновление Telemt успешно завершено! ---"
|
||||
echo
|
||||
echo "Для проверки статуса службы выполните:"
|
||||
echo " systemctl status telemt"
|
||||
write_root() { $SUDO sh -c 'cat > "$1"' _ "$1"; }
|
||||
|
||||
else
|
||||
# --- РЕЖИМ НОВОЙ УСТАНОВКИ ---
|
||||
echo "--- Начало автоматической установки Telemt ---"
|
||||
cleanup() {
|
||||
if [ -n "${TEMP_DIR:-}" ] && [ -d "$TEMP_DIR" ]; then
|
||||
rm -rf -- "$TEMP_DIR"
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT INT TERM
|
||||
|
||||
# Шаг 1: Скачивание и установка бинарного файла
|
||||
echo "[1/5] Скачивание последней версии Telemt..."
|
||||
wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz
|
||||
show_help() {
|
||||
say "Usage: $0 [ <version> | install | uninstall | purge ] [ options ]"
|
||||
say " <version> Install specific version (e.g. 3.3.15, default: latest)"
|
||||
say " install Install the latest version"
|
||||
say " uninstall Remove the binary and service"
|
||||
say " purge Remove everything including configuration, data, and user"
|
||||
say ""
|
||||
say "Options:"
|
||||
say " -d, --domain Set TLS domain (default: petrovich.ru)"
|
||||
say " -p, --port Set server port (default: 443)"
|
||||
say " -s, --secret Set specific user secret (32 hex characters)"
|
||||
say " -a, --ad-tag Set ad_tag"
|
||||
exit 0
|
||||
}
|
||||
|
||||
echo "[1/5] Перемещение исполняемого файла в /usr/local/bin и установка прав..."
|
||||
mv telemt /usr/local/bin/telemt
|
||||
chmod +x /usr/local/bin/telemt
|
||||
check_os_entity() {
|
||||
if command -v getent >/dev/null 2>&1; then getent "$1" "$2" >/dev/null 2>&1
|
||||
else grep -q "^${2}:" "/etc/$1" 2>/dev/null; fi
|
||||
}
|
||||
|
||||
# Шаг 2: Генерация секрета
|
||||
echo "[2/5] Генерация секретного ключа..."
|
||||
SECRET=$(openssl rand -hex 16)
|
||||
normalize_path() {
|
||||
printf '%s\n' "$1" | tr -s '/' | sed 's|/$||; s|^$|/|'
|
||||
}
|
||||
|
||||
# Шаг 3: Создание файла конфигурации
|
||||
echo "[3/5] Создание файла конфигурации /etc/telemt.toml..."
|
||||
printf "# === General Settings ===\n[general]\n[general.modes]\nclassic = false\nsecure = false\ntls = true\n\n# === Anti-Censorship & Masking ===\n[censorship]\n# !!! ВАЖНО: Замените на ваш домен или домен, который вы хотите использовать для маскировки !!!\ntls_domain = \"petrovich.ru\"\n\n[access.users]\nhello = \"%s\"\n" "$SECRET" > /etc/telemt.toml
|
||||
get_realpath() {
|
||||
path_in="$1"
|
||||
case "$path_in" in /*) ;; *) path_in="$(pwd)/$path_in" ;; esac
|
||||
|
||||
# Шаг 4: Создание службы Systemd
|
||||
echo "[4/5] Создание службы systemd..."
|
||||
printf "[Unit]\nDescription=Telemt Proxy\nAfter=network.target\n\n[Service]\nType=simple\nExecStart=/usr/local/bin/telemt /etc/telemt.toml\nRestart=on-failure\nRestartSec=5\nLimitNOFILE=65536\n\n[Install]\nWantedBy=multi-user.target\n" > /etc/systemd/system/telemt.service
|
||||
if command -v realpath >/dev/null 2>&1; then
|
||||
if realpath_out="$(realpath -m "$path_in" 2>/dev/null)"; then
|
||||
printf '%s\n' "$realpath_out"
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
# Шаг 5: Запуск службы
|
||||
echo "[5/5] Перезагрузка systemd, запуск и включение службы telemt..."
|
||||
systemctl daemon-reload
|
||||
systemctl start telemt
|
||||
systemctl enable telemt
|
||||
if command -v readlink >/dev/null 2>&1; then
|
||||
resolved_path="$(readlink -f "$path_in" 2>/dev/null || true)"
|
||||
if [ -n "$resolved_path" ]; then
|
||||
printf '%s\n' "$resolved_path"
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "--- Установка и запуск Telemt успешно завершены! ---"
|
||||
echo
|
||||
echo "ВАЖНАЯ ИНФОРМАЦИЯ:"
|
||||
echo "==================="
|
||||
echo "1. Вам НЕОБХОДИМО отредактировать файл /etc/telemt.toml и заменить '\''petrovich.ru'\'' на другой домен"
|
||||
echo " с помощью команды:"
|
||||
echo " nano /etc/telemt.toml"
|
||||
echo " После редактирования файла перезапустите службу командой:"
|
||||
echo " sudo systemctl restart telemt"
|
||||
echo
|
||||
echo "2. Для проверки статуса службы выполните команду:"
|
||||
echo " systemctl status telemt"
|
||||
echo
|
||||
echo "3. Для получения ссылок на подключение выполните команду:"
|
||||
echo " journalctl -u telemt -n -g '\''links'\'' --no-pager -o cat | tac"
|
||||
fi
|
||||
'
|
||||
d="${path_in%/*}"; b="${path_in##*/}"
|
||||
if [ -z "$d" ]; then d="/"; fi
|
||||
if [ "$d" = "$path_in" ]; then d="/"; b="$path_in"; fi
|
||||
|
||||
if [ -d "$d" ]; then
|
||||
abs_d="$(cd "$d" >/dev/null 2>&1 && pwd || true)"
|
||||
if [ -n "$abs_d" ]; then
|
||||
if [ "$b" = "." ] || [ -z "$b" ]; then printf '%s\n' "$abs_d"
|
||||
elif [ "$abs_d" = "/" ]; then printf '/%s\n' "$b"
|
||||
else printf '%s/%s\n' "$abs_d" "$b"; fi
|
||||
else
|
||||
normalize_path "$path_in"
|
||||
fi
|
||||
else
|
||||
normalize_path "$path_in"
|
||||
fi
|
||||
}
|
||||
|
||||
get_svc_mgr() {
|
||||
if command -v systemctl >/dev/null 2>&1 && [ -d /run/systemd/system ]; then echo "systemd"
|
||||
elif command -v rc-service >/dev/null 2>&1; then echo "openrc"
|
||||
else echo "none"; fi
|
||||
}
|
||||
|
||||
is_config_exists() {
|
||||
if [ -n "$SUDO" ]; then
|
||||
$SUDO sh -c '[ -f "$1" ]' _ "$CONFIG_FILE"
|
||||
else
|
||||
[ -f "$CONFIG_FILE" ]
|
||||
fi
|
||||
}
|
||||
|
||||
verify_common() {
|
||||
[ -n "$BIN_NAME" ] || die "BIN_NAME cannot be empty."
|
||||
[ -n "$INSTALL_DIR" ] || die "INSTALL_DIR cannot be empty."
|
||||
[ -n "$CONFIG_DIR" ] || die "CONFIG_DIR cannot be empty."
|
||||
[ -n "$CONFIG_FILE" ] || die "CONFIG_FILE cannot be empty."
|
||||
|
||||
case "${INSTALL_DIR}${CONFIG_DIR}${WORK_DIR}${CONFIG_FILE}" in
|
||||
*[!a-zA-Z0-9_./-]*) die "Invalid characters in paths." ;;
|
||||
esac
|
||||
|
||||
case "$TARGET_VERSION" in *[!a-zA-Z0-9_.-]*) die "Invalid characters in version." ;; esac
|
||||
case "$BIN_NAME" in *[!a-zA-Z0-9_-]*) die "Invalid characters in BIN_NAME." ;; esac
|
||||
|
||||
INSTALL_DIR="$(get_realpath "$INSTALL_DIR")"
|
||||
CONFIG_DIR="$(get_realpath "$CONFIG_DIR")"
|
||||
WORK_DIR="$(get_realpath "$WORK_DIR")"
|
||||
CONFIG_FILE="$(get_realpath "$CONFIG_FILE")"
|
||||
|
||||
CONFIG_PARENT_DIR="${CONFIG_FILE%/*}"
|
||||
if [ -z "$CONFIG_PARENT_DIR" ]; then CONFIG_PARENT_DIR="/"; fi
|
||||
if [ "$CONFIG_PARENT_DIR" = "$CONFIG_FILE" ]; then CONFIG_PARENT_DIR="."; fi
|
||||
|
||||
if [ "$(id -u)" -eq 0 ]; then
|
||||
SUDO=""
|
||||
else
|
||||
command -v sudo >/dev/null 2>&1 || die "This script requires root or sudo."
|
||||
SUDO="sudo"
|
||||
if ! sudo -n true 2>/dev/null; then
|
||||
if ! [ -t 0 ]; then
|
||||
die "sudo requires a password, but no TTY detected."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$SUDO" ]; then
|
||||
if $SUDO sh -c '[ -d "$1" ]' _ "$CONFIG_FILE"; then
|
||||
die "Safety check failed: CONFIG_FILE '$CONFIG_FILE' is a directory."
|
||||
fi
|
||||
elif [ -d "$CONFIG_FILE" ]; then
|
||||
die "Safety check failed: CONFIG_FILE '$CONFIG_FILE' is a directory."
|
||||
fi
|
||||
|
||||
for cmd in id uname awk grep find rm chown chmod mv mktemp mkdir tr dd sed ps head sleep cat tar gzip; do
|
||||
command -v "$cmd" >/dev/null 2>&1 || die "Required command not found: $cmd"
|
||||
done
|
||||
}
|
||||
|
||||
verify_install_deps() {
|
||||
command -v curl >/dev/null 2>&1 || command -v wget >/dev/null 2>&1 || die "Neither curl nor wget is installed."
|
||||
command -v cp >/dev/null 2>&1 || command -v install >/dev/null 2>&1 || die "Need cp or install"
|
||||
|
||||
if ! command -v setcap >/dev/null 2>&1 || ! command -v conntrack >/dev/null 2>&1; then
|
||||
if command -v apk >/dev/null 2>&1; then
|
||||
$SUDO apk add --no-cache libcap-utils libcap conntrack-tools >/dev/null 2>&1 || true
|
||||
elif command -v apt-get >/dev/null 2>&1; then
|
||||
$SUDO env DEBIAN_FRONTEND=noninteractive apt-get install -y -q libcap2-bin conntrack >/dev/null 2>&1 || {
|
||||
$SUDO env DEBIAN_FRONTEND=noninteractive apt-get update -q >/dev/null 2>&1 || true
|
||||
$SUDO env DEBIAN_FRONTEND=noninteractive apt-get install -y -q libcap2-bin conntrack >/dev/null 2>&1 || true
|
||||
}
|
||||
elif command -v dnf >/dev/null 2>&1; then $SUDO dnf install -y -q libcap conntrack-tools >/dev/null 2>&1 || true
|
||||
elif command -v yum >/dev/null 2>&1; then $SUDO yum install -y -q libcap conntrack-tools >/dev/null 2>&1 || true
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
check_port_availability() {
|
||||
port_info=""
|
||||
|
||||
if command -v ss >/dev/null 2>&1; then
|
||||
port_info=$($SUDO ss -tulnp 2>/dev/null | grep -E ":${SERVER_PORT}([[:space:]]|$)" || true)
|
||||
elif command -v netstat >/dev/null 2>&1; then
|
||||
port_info=$($SUDO netstat -tulnp 2>/dev/null | grep -E ":${SERVER_PORT}([[:space:]]|$)" || true)
|
||||
elif command -v lsof >/dev/null 2>&1; then
|
||||
port_info=$($SUDO lsof -i :${SERVER_PORT} 2>/dev/null | grep LISTEN || true)
|
||||
else
|
||||
say "[WARNING] Network diagnostic tools (ss, netstat, lsof) not found. Skipping port check."
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [ -n "$port_info" ]; then
|
||||
if printf '%s\n' "$port_info" | grep -q "${BIN_NAME}"; then
|
||||
say " -> Port ${SERVER_PORT} is in use by ${BIN_NAME}. Ignoring as it will be restarted."
|
||||
else
|
||||
say "[ERROR] Port ${SERVER_PORT} is already in use by another process:"
|
||||
printf ' %s\n' "$port_info"
|
||||
die "Please free the port ${SERVER_PORT} or change it and try again."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
detect_arch() {
|
||||
sys_arch="$(uname -m)"
|
||||
case "$sys_arch" in
|
||||
x86_64|amd64)
|
||||
if [ -r /proc/cpuinfo ] && grep -q "avx2" /proc/cpuinfo 2>/dev/null && grep -q "bmi2" /proc/cpuinfo 2>/dev/null; then
|
||||
echo "x86_64-v3"
|
||||
else
|
||||
echo "x86_64"
|
||||
fi
|
||||
;;
|
||||
aarch64|arm64) echo "aarch64" ;;
|
||||
*) die "Unsupported architecture: $sys_arch" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
detect_libc() {
|
||||
for f in /lib/ld-musl-*.so.* /lib64/ld-musl-*.so.*; do
|
||||
if [ -e "$f" ]; then echo "musl"; return 0; fi
|
||||
done
|
||||
if grep -qE '^ID="?alpine"?' /etc/os-release 2>/dev/null; then echo "musl"; return 0; fi
|
||||
if command -v ldd >/dev/null 2>&1 && (ldd --version 2>&1 || true) | grep -qi musl; then echo "musl"; return 0; fi
|
||||
echo "gnu"
|
||||
}
|
||||
|
||||
fetch_file() {
|
||||
if command -v curl >/dev/null 2>&1; then curl -fsSL "$1" -o "$2"
|
||||
else wget -q -O "$2" "$1"; fi
|
||||
}
|
||||
|
||||
ensure_user_group() {
|
||||
nologin_bin="$(command -v nologin 2>/dev/null || command -v false 2>/dev/null || echo /bin/false)"
|
||||
|
||||
if ! check_os_entity group telemt; then
|
||||
if command -v groupadd >/dev/null 2>&1; then $SUDO groupadd -r telemt
|
||||
elif command -v addgroup >/dev/null 2>&1; then $SUDO addgroup -S telemt
|
||||
else die "Cannot create group"; fi
|
||||
fi
|
||||
|
||||
if ! check_os_entity passwd telemt; then
|
||||
if command -v useradd >/dev/null 2>&1; then
|
||||
$SUDO useradd -r -g telemt -d "$WORK_DIR" -s "$nologin_bin" -c "Telemt Proxy" telemt
|
||||
elif command -v adduser >/dev/null 2>&1; then
|
||||
if adduser --help 2>&1 | grep -q -- '-S'; then
|
||||
$SUDO adduser -S -D -H -h "$WORK_DIR" -s "$nologin_bin" -G telemt telemt
|
||||
else
|
||||
$SUDO adduser --system --home "$WORK_DIR" --shell "$nologin_bin" --no-create-home --ingroup telemt --disabled-password telemt
|
||||
fi
|
||||
else die "Cannot create user"; fi
|
||||
fi
|
||||
}
|
||||
|
||||
setup_dirs() {
|
||||
$SUDO mkdir -p "$WORK_DIR" "$CONFIG_DIR" "$CONFIG_PARENT_DIR" || die "Failed to create directories"
|
||||
|
||||
$SUDO chown telemt:telemt "$WORK_DIR" && $SUDO chmod 750 "$WORK_DIR"
|
||||
$SUDO chown root:telemt "$CONFIG_DIR" && $SUDO chmod 750 "$CONFIG_DIR"
|
||||
|
||||
if [ "$CONFIG_PARENT_DIR" != "$CONFIG_DIR" ] && [ "$CONFIG_PARENT_DIR" != "." ] && [ "$CONFIG_PARENT_DIR" != "/" ]; then
|
||||
$SUDO chown root:telemt "$CONFIG_PARENT_DIR" && $SUDO chmod 750 "$CONFIG_PARENT_DIR"
|
||||
fi
|
||||
}
|
||||
|
||||
stop_service() {
|
||||
svc="$(get_svc_mgr)"
|
||||
if [ "$svc" = "systemd" ] && systemctl is-active --quiet "$SERVICE_NAME" 2>/dev/null; then
|
||||
$SUDO systemctl stop "$SERVICE_NAME" 2>/dev/null || true
|
||||
elif [ "$svc" = "openrc" ] && rc-service "$SERVICE_NAME" status >/dev/null 2>&1; then
|
||||
$SUDO rc-service "$SERVICE_NAME" stop 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
install_binary() {
|
||||
bin_src="$1"; bin_dst="$2"
|
||||
if [ -e "$INSTALL_DIR" ] && [ ! -d "$INSTALL_DIR" ]; then
|
||||
die "'$INSTALL_DIR' is not a directory."
|
||||
fi
|
||||
|
||||
$SUDO mkdir -p "$INSTALL_DIR" || die "Failed to create install directory"
|
||||
|
||||
$SUDO rm -f "$bin_dst" 2>/dev/null || true
|
||||
|
||||
if command -v install >/dev/null 2>&1; then
|
||||
$SUDO install -m 0755 "$bin_src" "$bin_dst" || die "Failed to install binary"
|
||||
else
|
||||
$SUDO cp "$bin_src" "$bin_dst" && $SUDO chmod 0755 "$bin_dst" || die "Failed to copy binary"
|
||||
fi
|
||||
|
||||
$SUDO sh -c '[ -x "$1" ]' _ "$bin_dst" || die "Binary not executable: $bin_dst"
|
||||
|
||||
if command -v setcap >/dev/null 2>&1; then
|
||||
$SUDO setcap cap_net_bind_service,cap_net_admin=+ep "$bin_dst" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
generate_secret() {
|
||||
secret="$(command -v openssl >/dev/null 2>&1 && openssl rand -hex 16 2>/dev/null || true)"
|
||||
if [ -z "$secret" ] || [ "${#secret}" -ne 32 ]; then
|
||||
if command -v od >/dev/null 2>&1; then secret="$(dd if=/dev/urandom bs=16 count=1 2>/dev/null | od -An -tx1 | tr -d ' \n')"
|
||||
elif command -v hexdump >/dev/null 2>&1; then secret="$(dd if=/dev/urandom bs=16 count=1 2>/dev/null | hexdump -e '1/1 "%02x"')"
|
||||
elif command -v xxd >/dev/null 2>&1; then secret="$(dd if=/dev/urandom bs=16 count=1 2>/dev/null | xxd -p | tr -d '\n')"
|
||||
fi
|
||||
fi
|
||||
if [ "${#secret}" -eq 32 ]; then echo "$secret"; else return 1; fi
|
||||
}
|
||||
|
||||
generate_config_content() {
|
||||
conf_secret="$1"
|
||||
conf_tag="$2"
|
||||
escaped_tls_domain="$(printf '%s\n' "$TLS_DOMAIN" | tr -d '[:cntrl:]' | sed 's/\\/\\\\/g; s/"/\\"/g')"
|
||||
|
||||
cat <<EOF
|
||||
[general]
|
||||
use_middle_proxy = true
|
||||
EOF
|
||||
|
||||
if [ -n "$conf_tag" ]; then
|
||||
echo "ad_tag = \"${conf_tag}\""
|
||||
fi
|
||||
|
||||
cat <<EOF
|
||||
|
||||
[general.modes]
|
||||
classic = false
|
||||
secure = false
|
||||
tls = true
|
||||
|
||||
[server]
|
||||
port = ${SERVER_PORT}
|
||||
|
||||
[server.api]
|
||||
enabled = true
|
||||
listen = "127.0.0.1:9091"
|
||||
whitelist = ["127.0.0.1/32"]
|
||||
|
||||
[censorship]
|
||||
tls_domain = "${escaped_tls_domain}"
|
||||
|
||||
[access.users]
|
||||
hello = "${conf_secret}"
|
||||
EOF
|
||||
}
|
||||
|
||||
install_config() {
|
||||
if is_config_exists; then
|
||||
say " -> Config already exists at $CONFIG_FILE. Updating parameters..."
|
||||
|
||||
tmp_conf="${TEMP_DIR}/config.tmp"
|
||||
$SUDO cat "$CONFIG_FILE" > "$tmp_conf"
|
||||
|
||||
escaped_domain="$(printf '%s\n' "$TLS_DOMAIN" | tr -d '[:cntrl:]' | sed 's/\\/\\\\/g; s/"/\\"/g')"
|
||||
|
||||
export AWK_PORT="$SERVER_PORT"
|
||||
export AWK_SECRET="$USER_SECRET"
|
||||
export AWK_DOMAIN="$escaped_domain"
|
||||
export AWK_AD_TAG="$AD_TAG"
|
||||
export AWK_FLAG_P="$PORT_PROVIDED"
|
||||
export AWK_FLAG_S="$SECRET_PROVIDED"
|
||||
export AWK_FLAG_D="$DOMAIN_PROVIDED"
|
||||
export AWK_FLAG_A="$AD_TAG_PROVIDED"
|
||||
|
||||
awk '
|
||||
BEGIN { ad_tag_handled = 0 }
|
||||
|
||||
ENVIRON["AWK_FLAG_P"] == "1" && /^[ \t]*port[ \t]*=/ { print "port = " ENVIRON["AWK_PORT"]; next }
|
||||
ENVIRON["AWK_FLAG_S"] == "1" && /^[ \t]*hello[ \t]*=/ { print "hello = \"" ENVIRON["AWK_SECRET"] "\""; next }
|
||||
ENVIRON["AWK_FLAG_D"] == "1" && /^[ \t]*tls_domain[ \t]*=/ { print "tls_domain = \"" ENVIRON["AWK_DOMAIN"] "\""; next }
|
||||
|
||||
ENVIRON["AWK_FLAG_A"] == "1" && /^[ \t]*ad_tag[ \t]*=/ {
|
||||
if (!ad_tag_handled) {
|
||||
print "ad_tag = \"" ENVIRON["AWK_AD_TAG"] "\"";
|
||||
ad_tag_handled = 1;
|
||||
}
|
||||
next
|
||||
}
|
||||
ENVIRON["AWK_FLAG_A"] == "1" && /^\[general\]/ {
|
||||
print;
|
||||
if (!ad_tag_handled) {
|
||||
print "ad_tag = \"" ENVIRON["AWK_AD_TAG"] "\"";
|
||||
ad_tag_handled = 1;
|
||||
}
|
||||
next
|
||||
}
|
||||
|
||||
{ print }
|
||||
' "$tmp_conf" > "${tmp_conf}.new" && mv "${tmp_conf}.new" "$tmp_conf"
|
||||
|
||||
[ "$PORT_PROVIDED" -eq 1 ] && say " -> Updated port: $SERVER_PORT"
|
||||
[ "$SECRET_PROVIDED" -eq 1 ] && say " -> Updated secret for user 'hello'"
|
||||
[ "$DOMAIN_PROVIDED" -eq 1 ] && say " -> Updated tls_domain: $TLS_DOMAIN"
|
||||
[ "$AD_TAG_PROVIDED" -eq 1 ] && say " -> Updated ad_tag"
|
||||
|
||||
write_root "$CONFIG_FILE" < "$tmp_conf"
|
||||
rm -f "$tmp_conf"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [ -z "$USER_SECRET" ]; then
|
||||
USER_SECRET="$(generate_secret)" || die "Failed to generate secret."
|
||||
fi
|
||||
|
||||
generate_config_content "$USER_SECRET" "$AD_TAG" | write_root "$CONFIG_FILE" || die "Failed to install config"
|
||||
$SUDO chown root:telemt "$CONFIG_FILE" && $SUDO chmod 640 "$CONFIG_FILE"
|
||||
|
||||
say " -> Config created successfully."
|
||||
say " -> Configured secret for user 'hello': $USER_SECRET"
|
||||
}
|
||||
|
||||
generate_systemd_content() {
|
||||
cat <<EOF
|
||||
[Unit]
|
||||
Description=Telemt
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=telemt
|
||||
Group=telemt
|
||||
WorkingDirectory=$WORK_DIR
|
||||
ExecStart="${INSTALL_DIR}/${BIN_NAME}" "${CONFIG_FILE}"
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
LimitNOFILE=65536
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE CAP_NET_ADMIN
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_NET_ADMIN
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
}
|
||||
|
||||
generate_openrc_content() {
|
||||
cat <<EOF
|
||||
#!/sbin/openrc-run
|
||||
name="$SERVICE_NAME"
|
||||
description="Telemt Proxy Service"
|
||||
command="${INSTALL_DIR}/${BIN_NAME}"
|
||||
command_args="${CONFIG_FILE}"
|
||||
command_background=true
|
||||
command_user="telemt:telemt"
|
||||
pidfile="/run/\${RC_SVCNAME}.pid"
|
||||
directory="${WORK_DIR}"
|
||||
rc_ulimit="-n 65536"
|
||||
depend() { need net; use logger; }
|
||||
EOF
|
||||
}
|
||||
|
||||
install_service() {
|
||||
svc="$(get_svc_mgr)"
|
||||
if [ "$svc" = "systemd" ]; then
|
||||
generate_systemd_content | write_root "/etc/systemd/system/${SERVICE_NAME}.service"
|
||||
$SUDO chown root:root "/etc/systemd/system/${SERVICE_NAME}.service" && $SUDO chmod 644 "/etc/systemd/system/${SERVICE_NAME}.service"
|
||||
|
||||
$SUDO systemctl daemon-reload || true
|
||||
$SUDO systemctl enable "$SERVICE_NAME" || true
|
||||
|
||||
if ! $SUDO systemctl start "$SERVICE_NAME"; then
|
||||
say "[WARNING] Failed to start service"
|
||||
SERVICE_START_FAILED=1
|
||||
fi
|
||||
elif [ "$svc" = "openrc" ]; then
|
||||
generate_openrc_content | write_root "/etc/init.d/${SERVICE_NAME}"
|
||||
$SUDO chown root:root "/etc/init.d/${SERVICE_NAME}" && $SUDO chmod 0755 "/etc/init.d/${SERVICE_NAME}"
|
||||
|
||||
$SUDO rc-update add "$SERVICE_NAME" default 2>/dev/null || true
|
||||
|
||||
if ! $SUDO rc-service "$SERVICE_NAME" start 2>/dev/null; then
|
||||
say "[WARNING] Failed to start service"
|
||||
SERVICE_START_FAILED=1
|
||||
fi
|
||||
else
|
||||
cmd="\"${INSTALL_DIR}/${BIN_NAME}\" \"${CONFIG_FILE}\""
|
||||
if [ -n "$SUDO" ]; then
|
||||
say " -> Service manager not found. Start manually: sudo -u telemt $cmd"
|
||||
else
|
||||
say " -> Service manager not found. Start manually: su -s /bin/sh telemt -c '$cmd'"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
kill_user_procs() {
|
||||
if command -v pkill >/dev/null 2>&1; then
|
||||
$SUDO pkill -u telemt "$BIN_NAME" 2>/dev/null || true
|
||||
sleep 1
|
||||
$SUDO pkill -9 -u telemt "$BIN_NAME" 2>/dev/null || true
|
||||
else
|
||||
if command -v pgrep >/dev/null 2>&1; then
|
||||
pids="$(pgrep -u telemt 2>/dev/null || true)"
|
||||
else
|
||||
pids="$(ps -ef 2>/dev/null | awk '$1=="telemt"{print $2}' || true)"
|
||||
[ -z "$pids" ] && pids="$(ps 2>/dev/null | awk '$2=="telemt"{print $1}' || true)"
|
||||
fi
|
||||
|
||||
if [ -n "$pids" ]; then
|
||||
for pid in $pids; do
|
||||
case "$pid" in ''|*[!0-9]*) continue ;; *) $SUDO kill "$pid" 2>/dev/null || true ;; esac
|
||||
done
|
||||
sleep 1
|
||||
for pid in $pids; do
|
||||
case "$pid" in ''|*[!0-9]*) continue ;; *) $SUDO kill -9 "$pid" 2>/dev/null || true ;; esac
|
||||
done
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
uninstall() {
|
||||
say "Starting uninstallation of $BIN_NAME..."
|
||||
|
||||
say ">>> Stage 1: Stopping services"
|
||||
stop_service
|
||||
|
||||
say ">>> Stage 2: Removing service configuration"
|
||||
svc="$(get_svc_mgr)"
|
||||
if [ "$svc" = "systemd" ]; then
|
||||
$SUDO systemctl disable "$SERVICE_NAME" 2>/dev/null || true
|
||||
$SUDO rm -f "/etc/systemd/system/${SERVICE_NAME}.service"
|
||||
$SUDO systemctl daemon-reload 2>/dev/null || true
|
||||
elif [ "$svc" = "openrc" ]; then
|
||||
$SUDO rc-update del "$SERVICE_NAME" 2>/dev/null || true
|
||||
$SUDO rm -f "/etc/init.d/${SERVICE_NAME}"
|
||||
fi
|
||||
|
||||
say ">>> Stage 3: Terminating user processes"
|
||||
kill_user_procs
|
||||
|
||||
say ">>> Stage 4: Removing binary"
|
||||
$SUDO rm -f "${INSTALL_DIR}/${BIN_NAME}"
|
||||
|
||||
if [ "$ACTION" = "purge" ]; then
|
||||
say ">>> Stage 5: Purging configuration, data, and user"
|
||||
$SUDO rm -rf "$CONFIG_DIR" "$WORK_DIR"
|
||||
$SUDO rm -f "$CONFIG_FILE"
|
||||
sleep 1
|
||||
$SUDO userdel telemt 2>/dev/null || $SUDO deluser telemt 2>/dev/null || true
|
||||
|
||||
if check_os_entity group telemt; then
|
||||
$SUDO groupdel telemt 2>/dev/null || $SUDO delgroup telemt 2>/dev/null || true
|
||||
fi
|
||||
else
|
||||
say "Note: Configuration and user kept. Run with 'purge' to remove completely."
|
||||
fi
|
||||
|
||||
printf '\n====================================================================\n'
|
||||
printf ' UNINSTALLATION COMPLETE\n'
|
||||
printf '====================================================================\n\n'
|
||||
exit 0
|
||||
}
|
||||
|
||||
case "$ACTION" in
|
||||
help) show_help ;;
|
||||
uninstall|purge) verify_common; uninstall ;;
|
||||
install)
|
||||
say "Starting installation of $BIN_NAME (Version: $TARGET_VERSION)"
|
||||
|
||||
say ">>> Stage 1: Verifying environment and dependencies"
|
||||
verify_common
|
||||
verify_install_deps
|
||||
|
||||
if is_config_exists && [ "$PORT_PROVIDED" -eq 0 ]; then
|
||||
ext_port="$($SUDO awk -F'=' '/^[ \t]*port[ \t]*=/ {gsub(/[^0-9]/, "", $2); print $2; exit}' "$CONFIG_FILE" 2>/dev/null || true)"
|
||||
if [ -n "$ext_port" ]; then
|
||||
SERVER_PORT="$ext_port"
|
||||
fi
|
||||
fi
|
||||
|
||||
check_port_availability
|
||||
|
||||
if [ "$TARGET_VERSION" != "latest" ]; then
|
||||
TARGET_VERSION="${TARGET_VERSION#v}"
|
||||
fi
|
||||
|
||||
ARCH="$(detect_arch)"; LIBC="$(detect_libc)"
|
||||
FILE_NAME="${BIN_NAME}-${ARCH}-linux-${LIBC}.tar.gz"
|
||||
|
||||
if [ "$TARGET_VERSION" = "latest" ]; then
|
||||
DL_URL="https://github.com/${REPO}/releases/latest/download/${FILE_NAME}"
|
||||
else
|
||||
DL_URL="https://github.com/${REPO}/releases/download/${TARGET_VERSION}/${FILE_NAME}"
|
||||
fi
|
||||
|
||||
say ">>> Stage 2: Downloading archive"
|
||||
TEMP_DIR="$(mktemp -d)" || die "Temp directory creation failed"
|
||||
if [ -z "$TEMP_DIR" ] || [ ! -d "$TEMP_DIR" ]; then
|
||||
die "Temp directory is invalid or was not created"
|
||||
fi
|
||||
|
||||
if ! fetch_file "$DL_URL" "${TEMP_DIR}/${FILE_NAME}"; then
|
||||
if [ "$ARCH" = "x86_64-v3" ]; then
|
||||
say " -> x86_64-v3 build not found, falling back to standard x86_64..."
|
||||
ARCH="x86_64"
|
||||
FILE_NAME="${BIN_NAME}-${ARCH}-linux-${LIBC}.tar.gz"
|
||||
if [ "$TARGET_VERSION" = "latest" ]; then
|
||||
DL_URL="https://github.com/${REPO}/releases/latest/download/${FILE_NAME}"
|
||||
else
|
||||
DL_URL="https://github.com/${REPO}/releases/download/${TARGET_VERSION}/${FILE_NAME}"
|
||||
fi
|
||||
fetch_file "$DL_URL" "${TEMP_DIR}/${FILE_NAME}" || die "Download failed"
|
||||
else
|
||||
die "Download failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
say ">>> Stage 3: Extracting archive"
|
||||
if ! gzip -dc "${TEMP_DIR}/${FILE_NAME}" | tar -xf - -C "$TEMP_DIR" 2>/dev/null; then
|
||||
die "Extraction failed (downloaded archive might be invalid or 404)."
|
||||
fi
|
||||
|
||||
EXTRACTED_BIN="$(find "$TEMP_DIR" -type f -name "$BIN_NAME" -print 2>/dev/null | head -n 1 || true)"
|
||||
[ -n "$EXTRACTED_BIN" ] || die "Binary '$BIN_NAME' not found in archive"
|
||||
|
||||
say ">>> Stage 4: Setting up environment (User, Group, Directories)"
|
||||
ensure_user_group; setup_dirs; stop_service
|
||||
|
||||
say ">>> Stage 5: Installing binary"
|
||||
install_binary "$EXTRACTED_BIN" "${INSTALL_DIR}/${BIN_NAME}"
|
||||
|
||||
say ">>> Stage 6: Generating/Updating configuration"
|
||||
install_config
|
||||
|
||||
say ">>> Stage 7: Installing and starting service"
|
||||
install_service
|
||||
|
||||
if [ "${SERVICE_START_FAILED:-0}" -eq 1 ]; then
|
||||
printf '\n====================================================================\n'
|
||||
printf ' INSTALLATION COMPLETED WITH WARNINGS\n'
|
||||
printf '====================================================================\n\n'
|
||||
printf 'The service was installed but failed to start automatically.\n'
|
||||
printf 'Please check the logs to determine the issue.\n\n'
|
||||
else
|
||||
printf '\n====================================================================\n'
|
||||
printf ' INSTALLATION SUCCESS\n'
|
||||
printf '====================================================================\n\n'
|
||||
fi
|
||||
|
||||
svc="$(get_svc_mgr)"
|
||||
if [ "$svc" = "systemd" ]; then
|
||||
printf 'To check the status of your proxy service, run:\n'
|
||||
printf ' systemctl status %s\n\n' "$SERVICE_NAME"
|
||||
elif [ "$svc" = "openrc" ]; then
|
||||
printf 'To check the status of your proxy service, run:\n'
|
||||
printf ' rc-service %s status\n\n' "$SERVICE_NAME"
|
||||
fi
|
||||
|
||||
API_LISTEN="$($SUDO awk -F'"' '/^[ \t]*listen[ \t]*=/ {print $2; exit}' "$CONFIG_FILE" 2>/dev/null || true)"
|
||||
API_LISTEN="${API_LISTEN:-127.0.0.1:9091}"
|
||||
|
||||
printf 'To get your user connection links (for Telegram), run:\n'
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
printf ' curl -s http://%s/v1/users | jq -r '\''.data[]? | "User: \\(.username)\\n\\(.links.tls[0] // empty)\\n"'\''\n' "$API_LISTEN"
|
||||
else
|
||||
printf ' curl -s http://%s/v1/users\n' "$API_LISTEN"
|
||||
printf ' (Tip: Install '\''jq'\'' for a much cleaner output)\n'
|
||||
fi
|
||||
|
||||
printf '\n====================================================================\n'
|
||||
;;
|
||||
esac
|
||||
|
|
|
|||
|
|
@ -0,0 +1,269 @@
|
|||
use std::collections::BTreeMap;
|
||||
use std::io::Write;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use hyper::header::IF_MATCH;
|
||||
use serde::Serialize;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
|
||||
use super::model::ApiFailure;
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub(super) enum AccessSection {
|
||||
Users,
|
||||
UserAdTags,
|
||||
UserMaxTcpConns,
|
||||
UserExpirations,
|
||||
UserDataQuota,
|
||||
UserMaxUniqueIps,
|
||||
}
|
||||
|
||||
impl AccessSection {
|
||||
fn table_name(self) -> &'static str {
|
||||
match self {
|
||||
Self::Users => "access.users",
|
||||
Self::UserAdTags => "access.user_ad_tags",
|
||||
Self::UserMaxTcpConns => "access.user_max_tcp_conns",
|
||||
Self::UserExpirations => "access.user_expirations",
|
||||
Self::UserDataQuota => "access.user_data_quota",
|
||||
Self::UserMaxUniqueIps => "access.user_max_unique_ips",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn parse_if_match(headers: &hyper::HeaderMap) -> Option<String> {
|
||||
headers
|
||||
.get(IF_MATCH)
|
||||
.and_then(|value| value.to_str().ok())
|
||||
.map(str::trim)
|
||||
.filter(|value| !value.is_empty())
|
||||
.map(|value| value.trim_matches('"').to_string())
|
||||
}
|
||||
|
||||
pub(super) async fn ensure_expected_revision(
|
||||
config_path: &Path,
|
||||
expected_revision: Option<&str>,
|
||||
) -> Result<(), ApiFailure> {
|
||||
let Some(expected) = expected_revision else {
|
||||
return Ok(());
|
||||
};
|
||||
let current = current_revision(config_path).await?;
|
||||
if current != expected {
|
||||
return Err(ApiFailure::new(
|
||||
hyper::StatusCode::CONFLICT,
|
||||
"revision_conflict",
|
||||
"Config revision mismatch",
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) async fn current_revision(config_path: &Path) -> Result<String, ApiFailure> {
|
||||
let content = tokio::fs::read_to_string(config_path)
|
||||
.await
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to read config: {}", e)))?;
|
||||
Ok(compute_revision(&content))
|
||||
}
|
||||
|
||||
pub(super) fn compute_revision(content: &str) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(content.as_bytes());
|
||||
hex::encode(hasher.finalize())
|
||||
}
|
||||
|
||||
pub(super) async fn load_config_from_disk(config_path: &Path) -> Result<ProxyConfig, ApiFailure> {
|
||||
let config_path = config_path.to_path_buf();
|
||||
tokio::task::spawn_blocking(move || ProxyConfig::load(config_path))
|
||||
.await
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to join config loader: {}", e)))?
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to load config: {}", e)))
|
||||
}
|
||||
|
||||
pub(super) async fn save_config_to_disk(
|
||||
config_path: &Path,
|
||||
cfg: &ProxyConfig,
|
||||
) -> Result<String, ApiFailure> {
|
||||
let serialized = toml::to_string_pretty(cfg)
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to serialize config: {}", e)))?;
|
||||
write_atomic(config_path.to_path_buf(), serialized.clone()).await?;
|
||||
Ok(compute_revision(&serialized))
|
||||
}
|
||||
|
||||
pub(super) async fn save_access_sections_to_disk(
|
||||
config_path: &Path,
|
||||
cfg: &ProxyConfig,
|
||||
sections: &[AccessSection],
|
||||
) -> Result<String, ApiFailure> {
|
||||
let mut content = tokio::fs::read_to_string(config_path)
|
||||
.await
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to read config: {}", e)))?;
|
||||
|
||||
let mut applied = Vec::new();
|
||||
for section in sections {
|
||||
if applied.contains(section) {
|
||||
continue;
|
||||
}
|
||||
let rendered = render_access_section(cfg, *section)?;
|
||||
content = upsert_toml_table(&content, section.table_name(), &rendered);
|
||||
applied.push(*section);
|
||||
}
|
||||
|
||||
write_atomic(config_path.to_path_buf(), content.clone()).await?;
|
||||
Ok(compute_revision(&content))
|
||||
}
|
||||
|
||||
fn render_access_section(cfg: &ProxyConfig, section: AccessSection) -> Result<String, ApiFailure> {
|
||||
let body = match section {
|
||||
AccessSection::Users => {
|
||||
let rows: BTreeMap<String, String> = cfg
|
||||
.access
|
||||
.users
|
||||
.iter()
|
||||
.map(|(key, value)| (key.clone(), value.clone()))
|
||||
.collect();
|
||||
serialize_table_body(&rows)?
|
||||
}
|
||||
AccessSection::UserAdTags => {
|
||||
let rows: BTreeMap<String, String> = cfg
|
||||
.access
|
||||
.user_ad_tags
|
||||
.iter()
|
||||
.map(|(key, value)| (key.clone(), value.clone()))
|
||||
.collect();
|
||||
serialize_table_body(&rows)?
|
||||
}
|
||||
AccessSection::UserMaxTcpConns => {
|
||||
let rows: BTreeMap<String, usize> = cfg
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.iter()
|
||||
.map(|(key, value)| (key.clone(), *value))
|
||||
.collect();
|
||||
serialize_table_body(&rows)?
|
||||
}
|
||||
AccessSection::UserExpirations => {
|
||||
let rows: BTreeMap<String, DateTime<Utc>> = cfg
|
||||
.access
|
||||
.user_expirations
|
||||
.iter()
|
||||
.map(|(key, value)| (key.clone(), *value))
|
||||
.collect();
|
||||
serialize_table_body(&rows)?
|
||||
}
|
||||
AccessSection::UserDataQuota => {
|
||||
let rows: BTreeMap<String, u64> = cfg
|
||||
.access
|
||||
.user_data_quota
|
||||
.iter()
|
||||
.map(|(key, value)| (key.clone(), *value))
|
||||
.collect();
|
||||
serialize_table_body(&rows)?
|
||||
}
|
||||
AccessSection::UserMaxUniqueIps => {
|
||||
let rows: BTreeMap<String, usize> = cfg
|
||||
.access
|
||||
.user_max_unique_ips
|
||||
.iter()
|
||||
.map(|(key, value)| (key.clone(), *value))
|
||||
.collect();
|
||||
serialize_table_body(&rows)?
|
||||
}
|
||||
};
|
||||
|
||||
let mut out = format!("[{}]\n", section.table_name());
|
||||
if !body.is_empty() {
|
||||
out.push_str(&body);
|
||||
}
|
||||
if !out.ends_with('\n') {
|
||||
out.push('\n');
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
fn serialize_table_body<T: Serialize>(value: &T) -> Result<String, ApiFailure> {
|
||||
toml::to_string(value)
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to serialize access section: {}", e)))
|
||||
}
|
||||
|
||||
fn upsert_toml_table(source: &str, table_name: &str, replacement: &str) -> String {
|
||||
if let Some((start, end)) = find_toml_table_bounds(source, table_name) {
|
||||
let mut out = String::with_capacity(source.len() + replacement.len());
|
||||
out.push_str(&source[..start]);
|
||||
out.push_str(replacement);
|
||||
out.push_str(&source[end..]);
|
||||
return out;
|
||||
}
|
||||
|
||||
let mut out = source.to_string();
|
||||
if !out.is_empty() && !out.ends_with('\n') {
|
||||
out.push('\n');
|
||||
}
|
||||
if !out.is_empty() {
|
||||
out.push('\n');
|
||||
}
|
||||
out.push_str(replacement);
|
||||
out
|
||||
}
|
||||
|
||||
fn find_toml_table_bounds(source: &str, table_name: &str) -> Option<(usize, usize)> {
|
||||
let target = format!("[{}]", table_name);
|
||||
let mut offset = 0usize;
|
||||
let mut start = None;
|
||||
|
||||
for line in source.split_inclusive('\n') {
|
||||
let trimmed = line.trim();
|
||||
if let Some(start_offset) = start {
|
||||
if trimmed.starts_with('[') {
|
||||
return Some((start_offset, offset));
|
||||
}
|
||||
} else if trimmed == target {
|
||||
start = Some(offset);
|
||||
}
|
||||
offset = offset.saturating_add(line.len());
|
||||
}
|
||||
|
||||
start.map(|start_offset| (start_offset, source.len()))
|
||||
}
|
||||
|
||||
async fn write_atomic(path: PathBuf, contents: String) -> Result<(), ApiFailure> {
|
||||
tokio::task::spawn_blocking(move || write_atomic_sync(&path, &contents))
|
||||
.await
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to join writer: {}", e)))?
|
||||
.map_err(|e| ApiFailure::internal(format!("failed to write config: {}", e)))
|
||||
}
|
||||
|
||||
fn write_atomic_sync(path: &Path, contents: &str) -> std::io::Result<()> {
|
||||
let parent = path.parent().unwrap_or_else(|| Path::new("."));
|
||||
std::fs::create_dir_all(parent)?;
|
||||
|
||||
let tmp_name = format!(
|
||||
".{}.tmp-{}",
|
||||
path.file_name()
|
||||
.and_then(|s| s.to_str())
|
||||
.unwrap_or("config.toml"),
|
||||
rand::random::<u64>()
|
||||
);
|
||||
let tmp_path = parent.join(tmp_name);
|
||||
|
||||
let write_result = (|| {
|
||||
let mut file = std::fs::OpenOptions::new()
|
||||
.create_new(true)
|
||||
.write(true)
|
||||
.open(&tmp_path)?;
|
||||
file.write_all(contents.as_bytes())?;
|
||||
file.sync_all()?;
|
||||
std::fs::rename(&tmp_path, path)?;
|
||||
if let Ok(dir) = std::fs::File::open(parent) {
|
||||
let _ = dir.sync_all();
|
||||
}
|
||||
Ok(())
|
||||
})();
|
||||
|
||||
if write_result.is_err() {
|
||||
let _ = std::fs::remove_file(&tmp_path);
|
||||
}
|
||||
write_result
|
||||
}
|
||||
|
|
@ -0,0 +1,90 @@
|
|||
use std::collections::VecDeque;
|
||||
use std::sync::Mutex;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct ApiEventRecord {
|
||||
pub(super) seq: u64,
|
||||
pub(super) ts_epoch_secs: u64,
|
||||
pub(super) event_type: String,
|
||||
pub(super) context: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct ApiEventSnapshot {
|
||||
pub(super) capacity: usize,
|
||||
pub(super) dropped_total: u64,
|
||||
pub(super) events: Vec<ApiEventRecord>,
|
||||
}
|
||||
|
||||
struct ApiEventsInner {
|
||||
capacity: usize,
|
||||
dropped_total: u64,
|
||||
next_seq: u64,
|
||||
events: VecDeque<ApiEventRecord>,
|
||||
}
|
||||
|
||||
/// Bounded ring-buffer for control-plane API/runtime events.
|
||||
pub(crate) struct ApiEventStore {
|
||||
inner: Mutex<ApiEventsInner>,
|
||||
}
|
||||
|
||||
impl ApiEventStore {
|
||||
pub(super) fn new(capacity: usize) -> Self {
|
||||
let bounded = capacity.max(16);
|
||||
Self {
|
||||
inner: Mutex::new(ApiEventsInner {
|
||||
capacity: bounded,
|
||||
dropped_total: 0,
|
||||
next_seq: 1,
|
||||
events: VecDeque::with_capacity(bounded),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn record(&self, event_type: &str, context: impl Into<String>) {
|
||||
let now_epoch_secs = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
let mut context = context.into();
|
||||
if context.len() > 256 {
|
||||
context.truncate(256);
|
||||
}
|
||||
|
||||
let mut guard = self.inner.lock().expect("api event store mutex poisoned");
|
||||
if guard.events.len() == guard.capacity {
|
||||
guard.events.pop_front();
|
||||
guard.dropped_total = guard.dropped_total.saturating_add(1);
|
||||
}
|
||||
let seq = guard.next_seq;
|
||||
guard.next_seq = guard.next_seq.saturating_add(1);
|
||||
guard.events.push_back(ApiEventRecord {
|
||||
seq,
|
||||
ts_epoch_secs: now_epoch_secs,
|
||||
event_type: event_type.to_string(),
|
||||
context,
|
||||
});
|
||||
}
|
||||
|
||||
pub(super) fn snapshot(&self, limit: usize) -> ApiEventSnapshot {
|
||||
let guard = self.inner.lock().expect("api event store mutex poisoned");
|
||||
let bounded_limit = limit.clamp(1, guard.capacity.max(1));
|
||||
let mut items: Vec<ApiEventRecord> = guard
|
||||
.events
|
||||
.iter()
|
||||
.rev()
|
||||
.take(bounded_limit)
|
||||
.cloned()
|
||||
.collect();
|
||||
items.reverse();
|
||||
|
||||
ApiEventSnapshot {
|
||||
capacity: guard.capacity,
|
||||
dropped_total: guard.dropped_total,
|
||||
events: items,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
use http_body_util::{BodyExt, Full};
|
||||
use hyper::StatusCode;
|
||||
use hyper::body::{Bytes, Incoming};
|
||||
use serde::Serialize;
|
||||
use serde::de::DeserializeOwned;
|
||||
|
||||
use super::model::{ApiFailure, ErrorBody, ErrorResponse, SuccessResponse};
|
||||
|
||||
pub(super) fn success_response<T: Serialize>(
|
||||
status: StatusCode,
|
||||
data: T,
|
||||
revision: String,
|
||||
) -> hyper::Response<Full<Bytes>> {
|
||||
let payload = SuccessResponse {
|
||||
ok: true,
|
||||
data,
|
||||
revision,
|
||||
};
|
||||
let body = serde_json::to_vec(&payload).unwrap_or_else(|_| b"{\"ok\":false}".to_vec());
|
||||
hyper::Response::builder()
|
||||
.status(status)
|
||||
.header("content-type", "application/json; charset=utf-8")
|
||||
.body(Full::new(Bytes::from(body)))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub(super) fn error_response(request_id: u64, failure: ApiFailure) -> hyper::Response<Full<Bytes>> {
|
||||
let payload = ErrorResponse {
|
||||
ok: false,
|
||||
error: ErrorBody {
|
||||
code: failure.code,
|
||||
message: failure.message,
|
||||
},
|
||||
request_id,
|
||||
};
|
||||
let body = serde_json::to_vec(&payload).unwrap_or_else(|_| {
|
||||
format!(
|
||||
"{{\"ok\":false,\"error\":{{\"code\":\"internal_error\",\"message\":\"serialization failed\"}},\"request_id\":{}}}",
|
||||
request_id
|
||||
)
|
||||
.into_bytes()
|
||||
});
|
||||
hyper::Response::builder()
|
||||
.status(failure.status)
|
||||
.header("content-type", "application/json; charset=utf-8")
|
||||
.body(Full::new(Bytes::from(body)))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub(super) async fn read_json<T: DeserializeOwned>(
|
||||
body: Incoming,
|
||||
limit: usize,
|
||||
) -> Result<T, ApiFailure> {
|
||||
let bytes = read_body_with_limit(body, limit).await?;
|
||||
serde_json::from_slice(&bytes).map_err(|_| ApiFailure::bad_request("Invalid JSON body"))
|
||||
}
|
||||
|
||||
pub(super) async fn read_optional_json<T: DeserializeOwned>(
|
||||
body: Incoming,
|
||||
limit: usize,
|
||||
) -> Result<Option<T>, ApiFailure> {
|
||||
let bytes = read_body_with_limit(body, limit).await?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
serde_json::from_slice(&bytes)
|
||||
.map(Some)
|
||||
.map_err(|_| ApiFailure::bad_request("Invalid JSON body"))
|
||||
}
|
||||
|
||||
async fn read_body_with_limit(body: Incoming, limit: usize) -> Result<Vec<u8>, ApiFailure> {
|
||||
let mut collected = Vec::new();
|
||||
let mut body = body;
|
||||
while let Some(frame_result) = body.frame().await {
|
||||
let frame = frame_result.map_err(|_| ApiFailure::bad_request("Invalid request body"))?;
|
||||
if let Some(chunk) = frame.data_ref() {
|
||||
if collected.len().saturating_add(chunk.len()) > limit {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::PAYLOAD_TOO_LARGE,
|
||||
"payload_too_large",
|
||||
format!("Body exceeds {} bytes", limit),
|
||||
));
|
||||
}
|
||||
collected.extend_from_slice(chunk);
|
||||
}
|
||||
}
|
||||
Ok(collected)
|
||||
}
|
||||
|
|
@ -0,0 +1,616 @@
|
|||
#![allow(clippy::too_many_arguments)]
|
||||
|
||||
use std::convert::Infallible;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
|
||||
|
||||
use http_body_util::Full;
|
||||
use hyper::body::{Bytes, Incoming};
|
||||
use hyper::header::AUTHORIZATION;
|
||||
use hyper::server::conn::http1;
|
||||
use hyper::service::service_fn;
|
||||
use hyper::{Method, Request, Response, StatusCode};
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::sync::{Mutex, RwLock, watch};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::proxy::route_mode::RouteRuntimeController;
|
||||
use crate::startup::StartupTracker;
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::UpstreamManager;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
|
||||
mod config_store;
|
||||
mod events;
|
||||
mod http_utils;
|
||||
mod model;
|
||||
mod runtime_edge;
|
||||
mod runtime_init;
|
||||
mod runtime_min;
|
||||
mod runtime_selftest;
|
||||
mod runtime_stats;
|
||||
mod runtime_watch;
|
||||
mod runtime_zero;
|
||||
mod users;
|
||||
|
||||
use config_store::{current_revision, load_config_from_disk, parse_if_match};
|
||||
use events::ApiEventStore;
|
||||
use http_utils::{error_response, read_json, read_optional_json, success_response};
|
||||
use model::{
|
||||
ApiFailure, CreateUserRequest, DeleteUserResponse, HealthData, PatchUserRequest,
|
||||
RotateSecretRequest, SummaryData, UserActiveIps,
|
||||
};
|
||||
use runtime_edge::{
|
||||
EdgeConnectionsCacheEntry, build_runtime_connections_summary_data,
|
||||
build_runtime_events_recent_data,
|
||||
};
|
||||
use runtime_init::build_runtime_initialization_data;
|
||||
use runtime_min::{
|
||||
build_runtime_me_pool_state_data, build_runtime_me_quality_data, build_runtime_nat_stun_data,
|
||||
build_runtime_upstream_quality_data, build_security_whitelist_data,
|
||||
};
|
||||
use runtime_selftest::build_runtime_me_selftest_data;
|
||||
use runtime_stats::{
|
||||
MinimalCacheEntry, build_dcs_data, build_me_writers_data, build_minimal_all_data,
|
||||
build_upstreams_data, build_zero_all_data,
|
||||
};
|
||||
use runtime_watch::spawn_runtime_watchers;
|
||||
use runtime_zero::{
|
||||
build_limits_effective_data, build_runtime_gates_data, build_security_posture_data,
|
||||
build_system_info_data,
|
||||
};
|
||||
use users::{create_user, delete_user, patch_user, rotate_secret, users_from_config};
|
||||
|
||||
pub(super) struct ApiRuntimeState {
|
||||
pub(super) process_started_at_epoch_secs: u64,
|
||||
pub(super) config_reload_count: AtomicU64,
|
||||
pub(super) last_config_reload_epoch_secs: AtomicU64,
|
||||
pub(super) admission_open: AtomicBool,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(super) struct ApiShared {
|
||||
pub(super) stats: Arc<Stats>,
|
||||
pub(super) ip_tracker: Arc<UserIpTracker>,
|
||||
pub(super) me_pool: Arc<RwLock<Option<Arc<MePool>>>>,
|
||||
pub(super) upstream_manager: Arc<UpstreamManager>,
|
||||
pub(super) config_path: PathBuf,
|
||||
pub(super) detected_ips_rx: watch::Receiver<(Option<IpAddr>, Option<IpAddr>)>,
|
||||
pub(super) mutation_lock: Arc<Mutex<()>>,
|
||||
pub(super) minimal_cache: Arc<Mutex<Option<MinimalCacheEntry>>>,
|
||||
pub(super) runtime_edge_connections_cache: Arc<Mutex<Option<EdgeConnectionsCacheEntry>>>,
|
||||
pub(super) runtime_edge_recompute_lock: Arc<Mutex<()>>,
|
||||
pub(super) runtime_events: Arc<ApiEventStore>,
|
||||
pub(super) request_id: Arc<AtomicU64>,
|
||||
pub(super) runtime_state: Arc<ApiRuntimeState>,
|
||||
pub(super) startup_tracker: Arc<StartupTracker>,
|
||||
pub(super) route_runtime: Arc<RouteRuntimeController>,
|
||||
}
|
||||
|
||||
impl ApiShared {
|
||||
fn next_request_id(&self) -> u64 {
|
||||
self.request_id.fetch_add(1, Ordering::Relaxed)
|
||||
}
|
||||
|
||||
fn detected_link_ips(&self) -> (Option<IpAddr>, Option<IpAddr>) {
|
||||
*self.detected_ips_rx.borrow()
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn serve(
|
||||
listen: SocketAddr,
|
||||
stats: Arc<Stats>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
me_pool: Arc<RwLock<Option<Arc<MePool>>>>,
|
||||
route_runtime: Arc<RouteRuntimeController>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
admission_rx: watch::Receiver<bool>,
|
||||
config_path: PathBuf,
|
||||
detected_ips_rx: watch::Receiver<(Option<IpAddr>, Option<IpAddr>)>,
|
||||
process_started_at_epoch_secs: u64,
|
||||
startup_tracker: Arc<StartupTracker>,
|
||||
) {
|
||||
let listener = match TcpListener::bind(listen).await {
|
||||
Ok(listener) => listener,
|
||||
Err(error) => {
|
||||
warn!(
|
||||
error = %error,
|
||||
listen = %listen,
|
||||
"Failed to bind API listener"
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
info!("API endpoint: http://{}/v1/*", listen);
|
||||
|
||||
let runtime_state = Arc::new(ApiRuntimeState {
|
||||
process_started_at_epoch_secs,
|
||||
config_reload_count: AtomicU64::new(0),
|
||||
last_config_reload_epoch_secs: AtomicU64::new(0),
|
||||
admission_open: AtomicBool::new(*admission_rx.borrow()),
|
||||
});
|
||||
|
||||
let shared = Arc::new(ApiShared {
|
||||
stats,
|
||||
ip_tracker,
|
||||
me_pool,
|
||||
upstream_manager,
|
||||
config_path,
|
||||
detected_ips_rx,
|
||||
mutation_lock: Arc::new(Mutex::new(())),
|
||||
minimal_cache: Arc::new(Mutex::new(None)),
|
||||
runtime_edge_connections_cache: Arc::new(Mutex::new(None)),
|
||||
runtime_edge_recompute_lock: Arc::new(Mutex::new(())),
|
||||
runtime_events: Arc::new(ApiEventStore::new(
|
||||
config_rx.borrow().server.api.runtime_edge_events_capacity,
|
||||
)),
|
||||
request_id: Arc::new(AtomicU64::new(1)),
|
||||
runtime_state: runtime_state.clone(),
|
||||
startup_tracker,
|
||||
route_runtime,
|
||||
});
|
||||
|
||||
spawn_runtime_watchers(
|
||||
config_rx.clone(),
|
||||
admission_rx.clone(),
|
||||
runtime_state.clone(),
|
||||
shared.runtime_events.clone(),
|
||||
);
|
||||
|
||||
loop {
|
||||
let (stream, peer) = match listener.accept().await {
|
||||
Ok(v) => v,
|
||||
Err(error) => {
|
||||
warn!(error = %error, "API accept error");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let shared_conn = shared.clone();
|
||||
let config_rx_conn = config_rx.clone();
|
||||
tokio::spawn(async move {
|
||||
let svc = service_fn(move |req: Request<Incoming>| {
|
||||
let shared_req = shared_conn.clone();
|
||||
let config_rx_req = config_rx_conn.clone();
|
||||
async move { handle(req, peer, shared_req, config_rx_req).await }
|
||||
});
|
||||
if let Err(error) = http1::Builder::new()
|
||||
.serve_connection(hyper_util::rt::TokioIo::new(stream), svc)
|
||||
.await
|
||||
{
|
||||
debug!(error = %error, "API connection error");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
req: Request<Incoming>,
|
||||
peer: SocketAddr,
|
||||
shared: Arc<ApiShared>,
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
) -> Result<Response<Full<Bytes>>, Infallible> {
|
||||
let request_id = shared.next_request_id();
|
||||
let cfg = config_rx.borrow().clone();
|
||||
let api_cfg = &cfg.server.api;
|
||||
|
||||
if !api_cfg.enabled {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
"api_disabled",
|
||||
"API is disabled",
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
if !api_cfg.whitelist.is_empty() && !api_cfg.whitelist.iter().any(|net| net.contains(peer.ip()))
|
||||
{
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::FORBIDDEN,
|
||||
"forbidden",
|
||||
"Source IP is not allowed",
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
if !api_cfg.auth_header.is_empty() {
|
||||
let auth_ok = req
|
||||
.headers()
|
||||
.get(AUTHORIZATION)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|v| v == api_cfg.auth_header)
|
||||
.unwrap_or(false);
|
||||
if !auth_ok {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::UNAUTHORIZED,
|
||||
"unauthorized",
|
||||
"Missing or invalid Authorization header",
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let method = req.method().clone();
|
||||
let path = req.uri().path().to_string();
|
||||
let query = req.uri().query().map(str::to_string);
|
||||
let body_limit = api_cfg.request_body_limit_bytes;
|
||||
|
||||
let result: Result<Response<Full<Bytes>>, ApiFailure> = async {
|
||||
match (method.as_str(), path.as_str()) {
|
||||
("GET", "/v1/health") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = HealthData {
|
||||
status: "ok",
|
||||
read_only: api_cfg.read_only,
|
||||
};
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/system/info") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_system_info_data(shared.as_ref(), cfg.as_ref(), &revision);
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/gates") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_gates_data(shared.as_ref(), cfg.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/initialization") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_initialization_data(shared.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/limits/effective") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_limits_effective_data(cfg.as_ref());
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/security/posture") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_security_posture_data(cfg.as_ref());
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/security/whitelist") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_security_whitelist_data(cfg.as_ref());
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/summary") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = SummaryData {
|
||||
uptime_seconds: shared.stats.uptime_secs(),
|
||||
connections_total: shared.stats.get_connects_all(),
|
||||
connections_bad_total: shared.stats.get_connects_bad(),
|
||||
handshake_timeouts_total: shared.stats.get_handshake_timeouts(),
|
||||
configured_users: cfg.access.users.len(),
|
||||
};
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/zero/all") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_zero_all_data(&shared.stats, cfg.access.users.len());
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/upstreams") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_upstreams_data(shared.as_ref(), api_cfg);
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/minimal/all") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_minimal_all_data(shared.as_ref(), api_cfg).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/me-writers") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_me_writers_data(shared.as_ref(), api_cfg).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/dcs") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_dcs_data(shared.as_ref(), api_cfg).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/me_pool_state") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_me_pool_state_data(shared.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/me_quality") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_me_quality_data(shared.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/upstream_quality") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_upstream_quality_data(shared.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/nat_stun") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_nat_stun_data(shared.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/me-selftest") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_me_selftest_data(shared.as_ref(), cfg.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/connections/summary") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data =
|
||||
build_runtime_connections_summary_data(shared.as_ref(), cfg.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/events/recent") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_events_recent_data(
|
||||
shared.as_ref(),
|
||||
cfg.as_ref(),
|
||||
query.as_deref(),
|
||||
);
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/users/active-ips") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let usernames: Vec<_> = cfg.access.users.keys().cloned().collect();
|
||||
let active_ips_map = shared.ip_tracker.get_active_ips_for_users(&usernames).await;
|
||||
let mut data: Vec<UserActiveIps> = active_ips_map
|
||||
.into_iter()
|
||||
.filter(|(_, ips)| !ips.is_empty())
|
||||
.map(|(username, active_ips)| UserActiveIps {
|
||||
username,
|
||||
active_ips,
|
||||
})
|
||||
.collect();
|
||||
data.sort_by(|a, b| a.username.cmp(&b.username));
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/users") | ("GET", "/v1/users") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let disk_cfg = load_config_from_disk(&shared.config_path).await?;
|
||||
let runtime_cfg = config_rx.borrow().clone();
|
||||
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||
let users = users_from_config(
|
||||
&disk_cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
Some(runtime_cfg.as_ref()),
|
||||
)
|
||||
.await;
|
||||
Ok(success_response(StatusCode::OK, users, revision))
|
||||
}
|
||||
("POST", "/v1/users") => {
|
||||
if api_cfg.read_only {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::FORBIDDEN,
|
||||
"read_only",
|
||||
"API runs in read-only mode",
|
||||
),
|
||||
));
|
||||
}
|
||||
let expected_revision = parse_if_match(req.headers());
|
||||
let body = read_json::<CreateUserRequest>(req.into_body(), body_limit).await?;
|
||||
let result = create_user(body, expected_revision, &shared).await;
|
||||
let (mut data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared
|
||||
.runtime_events
|
||||
.record("api.user.create.failed", error.code);
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
let runtime_cfg = config_rx.borrow().clone();
|
||||
data.user.in_runtime = runtime_cfg.access.users.contains_key(&data.user.username);
|
||||
shared.runtime_events.record(
|
||||
"api.user.create.ok",
|
||||
format!("username={}", data.user.username),
|
||||
);
|
||||
let status = if data.user.in_runtime {
|
||||
StatusCode::CREATED
|
||||
} else {
|
||||
StatusCode::ACCEPTED
|
||||
};
|
||||
Ok(success_response(status, data, revision))
|
||||
}
|
||||
_ => {
|
||||
if let Some(user) = path.strip_prefix("/v1/users/")
|
||||
&& !user.is_empty()
|
||||
&& !user.contains('/')
|
||||
{
|
||||
if method == Method::GET {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let disk_cfg = load_config_from_disk(&shared.config_path).await?;
|
||||
let runtime_cfg = config_rx.borrow().clone();
|
||||
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||
let users = users_from_config(
|
||||
&disk_cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
Some(runtime_cfg.as_ref()),
|
||||
)
|
||||
.await;
|
||||
if let Some(user_info) =
|
||||
users.into_iter().find(|entry| entry.username == user)
|
||||
{
|
||||
return Ok(success_response(StatusCode::OK, user_info, revision));
|
||||
}
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(StatusCode::NOT_FOUND, "not_found", "User not found"),
|
||||
));
|
||||
}
|
||||
if method == Method::PATCH {
|
||||
if api_cfg.read_only {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::FORBIDDEN,
|
||||
"read_only",
|
||||
"API runs in read-only mode",
|
||||
),
|
||||
));
|
||||
}
|
||||
let expected_revision = parse_if_match(req.headers());
|
||||
let body =
|
||||
read_json::<PatchUserRequest>(req.into_body(), body_limit).await?;
|
||||
let result = patch_user(user, body, expected_revision, &shared).await;
|
||||
let (mut data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared.runtime_events.record(
|
||||
"api.user.patch.failed",
|
||||
format!("username={} code={}", user, error.code),
|
||||
);
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
let runtime_cfg = config_rx.borrow().clone();
|
||||
data.in_runtime = runtime_cfg.access.users.contains_key(&data.username);
|
||||
shared
|
||||
.runtime_events
|
||||
.record("api.user.patch.ok", format!("username={}", data.username));
|
||||
let status = if data.in_runtime {
|
||||
StatusCode::OK
|
||||
} else {
|
||||
StatusCode::ACCEPTED
|
||||
};
|
||||
return Ok(success_response(status, data, revision));
|
||||
}
|
||||
if method == Method::DELETE {
|
||||
if api_cfg.read_only {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::FORBIDDEN,
|
||||
"read_only",
|
||||
"API runs in read-only mode",
|
||||
),
|
||||
));
|
||||
}
|
||||
let expected_revision = parse_if_match(req.headers());
|
||||
let result = delete_user(user, expected_revision, &shared).await;
|
||||
let (deleted_user, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared.runtime_events.record(
|
||||
"api.user.delete.failed",
|
||||
format!("username={} code={}", user, error.code),
|
||||
);
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
shared
|
||||
.runtime_events
|
||||
.record("api.user.delete.ok", format!("username={}", deleted_user));
|
||||
let runtime_cfg = config_rx.borrow().clone();
|
||||
let in_runtime = runtime_cfg.access.users.contains_key(&deleted_user);
|
||||
let response = DeleteUserResponse {
|
||||
username: deleted_user,
|
||||
in_runtime,
|
||||
};
|
||||
let status = if response.in_runtime {
|
||||
StatusCode::ACCEPTED
|
||||
} else {
|
||||
StatusCode::OK
|
||||
};
|
||||
return Ok(success_response(status, response, revision));
|
||||
}
|
||||
if method == Method::POST
|
||||
&& let Some(base_user) = user.strip_suffix("/rotate-secret")
|
||||
&& !base_user.is_empty()
|
||||
&& !base_user.contains('/')
|
||||
{
|
||||
if api_cfg.read_only {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::FORBIDDEN,
|
||||
"read_only",
|
||||
"API runs in read-only mode",
|
||||
),
|
||||
));
|
||||
}
|
||||
let expected_revision = parse_if_match(req.headers());
|
||||
let body =
|
||||
read_optional_json::<RotateSecretRequest>(req.into_body(), body_limit)
|
||||
.await?;
|
||||
let result = rotate_secret(
|
||||
base_user,
|
||||
body.unwrap_or_default(),
|
||||
expected_revision,
|
||||
&shared,
|
||||
)
|
||||
.await;
|
||||
let (mut data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared.runtime_events.record(
|
||||
"api.user.rotate_secret.failed",
|
||||
format!("username={} code={}", base_user, error.code),
|
||||
);
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
let runtime_cfg = config_rx.borrow().clone();
|
||||
data.user.in_runtime =
|
||||
runtime_cfg.access.users.contains_key(&data.user.username);
|
||||
shared.runtime_events.record(
|
||||
"api.user.rotate_secret.ok",
|
||||
format!("username={}", base_user),
|
||||
);
|
||||
let status = if data.user.in_runtime {
|
||||
StatusCode::OK
|
||||
} else {
|
||||
StatusCode::ACCEPTED
|
||||
};
|
||||
return Ok(success_response(status, data, revision));
|
||||
}
|
||||
if method == Method::POST {
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(StatusCode::NOT_FOUND, "not_found", "Route not found"),
|
||||
));
|
||||
}
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(
|
||||
StatusCode::METHOD_NOT_ALLOWED,
|
||||
"method_not_allowed",
|
||||
"Unsupported HTTP method for this route",
|
||||
),
|
||||
));
|
||||
}
|
||||
Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(StatusCode::NOT_FOUND, "not_found", "Route not found"),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(resp) => Ok(resp),
|
||||
Err(error) => Ok(error_response(request_id, error)),
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,534 @@
|
|||
use std::net::IpAddr;
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use hyper::StatusCode;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::crypto::SecureRandom;
|
||||
|
||||
const MAX_USERNAME_LEN: usize = 64;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(super) struct ApiFailure {
|
||||
pub(super) status: StatusCode,
|
||||
pub(super) code: &'static str,
|
||||
pub(super) message: String,
|
||||
}
|
||||
|
||||
impl ApiFailure {
|
||||
pub(super) fn new(status: StatusCode, code: &'static str, message: impl Into<String>) -> Self {
|
||||
Self {
|
||||
status,
|
||||
code,
|
||||
message: message.into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn internal(message: impl Into<String>) -> Self {
|
||||
Self::new(StatusCode::INTERNAL_SERVER_ERROR, "internal_error", message)
|
||||
}
|
||||
|
||||
pub(super) fn bad_request(message: impl Into<String>) -> Self {
|
||||
Self::new(StatusCode::BAD_REQUEST, "bad_request", message)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct ErrorBody {
|
||||
pub(super) code: &'static str,
|
||||
pub(super) message: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct ErrorResponse {
|
||||
pub(super) ok: bool,
|
||||
pub(super) error: ErrorBody,
|
||||
pub(super) request_id: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct SuccessResponse<T> {
|
||||
pub(super) ok: bool,
|
||||
pub(super) data: T,
|
||||
pub(super) revision: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct HealthData {
|
||||
pub(super) status: &'static str,
|
||||
pub(super) read_only: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct SummaryData {
|
||||
pub(super) uptime_seconds: f64,
|
||||
pub(super) connections_total: u64,
|
||||
pub(super) connections_bad_total: u64,
|
||||
pub(super) handshake_timeouts_total: u64,
|
||||
pub(super) configured_users: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroCodeCount {
|
||||
pub(super) code: i32,
|
||||
pub(super) total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroCoreData {
|
||||
pub(super) uptime_seconds: f64,
|
||||
pub(super) connections_total: u64,
|
||||
pub(super) connections_bad_total: u64,
|
||||
pub(super) handshake_timeouts_total: u64,
|
||||
pub(super) accept_permit_timeout_total: u64,
|
||||
pub(super) configured_users: usize,
|
||||
pub(super) telemetry_core_enabled: bool,
|
||||
pub(super) telemetry_user_enabled: bool,
|
||||
pub(super) telemetry_me_level: String,
|
||||
pub(super) conntrack_control_enabled: bool,
|
||||
pub(super) conntrack_control_available: bool,
|
||||
pub(super) conntrack_pressure_active: bool,
|
||||
pub(super) conntrack_event_queue_depth: u64,
|
||||
pub(super) conntrack_rule_apply_ok: bool,
|
||||
pub(super) conntrack_delete_attempt_total: u64,
|
||||
pub(super) conntrack_delete_success_total: u64,
|
||||
pub(super) conntrack_delete_not_found_total: u64,
|
||||
pub(super) conntrack_delete_error_total: u64,
|
||||
pub(super) conntrack_close_event_drop_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroUpstreamData {
|
||||
pub(super) connect_attempt_total: u64,
|
||||
pub(super) connect_success_total: u64,
|
||||
pub(super) connect_fail_total: u64,
|
||||
pub(super) connect_failfast_hard_error_total: u64,
|
||||
pub(super) connect_attempts_bucket_1: u64,
|
||||
pub(super) connect_attempts_bucket_2: u64,
|
||||
pub(super) connect_attempts_bucket_3_4: u64,
|
||||
pub(super) connect_attempts_bucket_gt_4: u64,
|
||||
pub(super) connect_duration_success_bucket_le_100ms: u64,
|
||||
pub(super) connect_duration_success_bucket_101_500ms: u64,
|
||||
pub(super) connect_duration_success_bucket_501_1000ms: u64,
|
||||
pub(super) connect_duration_success_bucket_gt_1000ms: u64,
|
||||
pub(super) connect_duration_fail_bucket_le_100ms: u64,
|
||||
pub(super) connect_duration_fail_bucket_101_500ms: u64,
|
||||
pub(super) connect_duration_fail_bucket_501_1000ms: u64,
|
||||
pub(super) connect_duration_fail_bucket_gt_1000ms: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct UpstreamDcStatus {
|
||||
pub(super) dc: i16,
|
||||
pub(super) latency_ema_ms: Option<f64>,
|
||||
pub(super) ip_preference: &'static str,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct UpstreamStatus {
|
||||
pub(super) upstream_id: usize,
|
||||
pub(super) route_kind: &'static str,
|
||||
pub(super) address: String,
|
||||
pub(super) weight: u16,
|
||||
pub(super) scopes: String,
|
||||
pub(super) healthy: bool,
|
||||
pub(super) fails: u32,
|
||||
pub(super) last_check_age_secs: u64,
|
||||
pub(super) effective_latency_ms: Option<f64>,
|
||||
pub(super) dc: Vec<UpstreamDcStatus>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct UpstreamSummaryData {
|
||||
pub(super) configured_total: usize,
|
||||
pub(super) healthy_total: usize,
|
||||
pub(super) unhealthy_total: usize,
|
||||
pub(super) direct_total: usize,
|
||||
pub(super) socks4_total: usize,
|
||||
pub(super) socks5_total: usize,
|
||||
pub(super) shadowsocks_total: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct UpstreamsData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) zero: ZeroUpstreamData,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) summary: Option<UpstreamSummaryData>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) upstreams: Option<Vec<UpstreamStatus>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroMiddleProxyData {
|
||||
pub(super) keepalive_sent_total: u64,
|
||||
pub(super) keepalive_failed_total: u64,
|
||||
pub(super) keepalive_pong_total: u64,
|
||||
pub(super) keepalive_timeout_total: u64,
|
||||
pub(super) rpc_proxy_req_signal_sent_total: u64,
|
||||
pub(super) rpc_proxy_req_signal_failed_total: u64,
|
||||
pub(super) rpc_proxy_req_signal_skipped_no_meta_total: u64,
|
||||
pub(super) rpc_proxy_req_signal_response_total: u64,
|
||||
pub(super) rpc_proxy_req_signal_close_sent_total: u64,
|
||||
pub(super) reconnect_attempt_total: u64,
|
||||
pub(super) reconnect_success_total: u64,
|
||||
pub(super) handshake_reject_total: u64,
|
||||
pub(super) handshake_error_codes: Vec<ZeroCodeCount>,
|
||||
pub(super) reader_eof_total: u64,
|
||||
pub(super) idle_close_by_peer_total: u64,
|
||||
pub(super) route_drop_no_conn_total: u64,
|
||||
pub(super) route_drop_channel_closed_total: u64,
|
||||
pub(super) route_drop_queue_full_total: u64,
|
||||
pub(super) route_drop_queue_full_base_total: u64,
|
||||
pub(super) route_drop_queue_full_high_total: u64,
|
||||
pub(super) d2c_batches_total: u64,
|
||||
pub(super) d2c_batch_frames_total: u64,
|
||||
pub(super) d2c_batch_bytes_total: u64,
|
||||
pub(super) d2c_flush_reason_queue_drain_total: u64,
|
||||
pub(super) d2c_flush_reason_batch_frames_total: u64,
|
||||
pub(super) d2c_flush_reason_batch_bytes_total: u64,
|
||||
pub(super) d2c_flush_reason_max_delay_total: u64,
|
||||
pub(super) d2c_flush_reason_ack_immediate_total: u64,
|
||||
pub(super) d2c_flush_reason_close_total: u64,
|
||||
pub(super) d2c_data_frames_total: u64,
|
||||
pub(super) d2c_ack_frames_total: u64,
|
||||
pub(super) d2c_payload_bytes_total: u64,
|
||||
pub(super) d2c_write_mode_coalesced_total: u64,
|
||||
pub(super) d2c_write_mode_split_total: u64,
|
||||
pub(super) d2c_quota_reject_pre_write_total: u64,
|
||||
pub(super) d2c_quota_reject_post_write_total: u64,
|
||||
pub(super) d2c_frame_buf_shrink_total: u64,
|
||||
pub(super) d2c_frame_buf_shrink_bytes_total: u64,
|
||||
pub(super) socks_kdf_strict_reject_total: u64,
|
||||
pub(super) socks_kdf_compat_fallback_total: u64,
|
||||
pub(super) endpoint_quarantine_total: u64,
|
||||
pub(super) kdf_drift_total: u64,
|
||||
pub(super) kdf_port_only_drift_total: u64,
|
||||
pub(super) hardswap_pending_reuse_total: u64,
|
||||
pub(super) hardswap_pending_ttl_expired_total: u64,
|
||||
pub(super) single_endpoint_outage_enter_total: u64,
|
||||
pub(super) single_endpoint_outage_exit_total: u64,
|
||||
pub(super) single_endpoint_outage_reconnect_attempt_total: u64,
|
||||
pub(super) single_endpoint_outage_reconnect_success_total: u64,
|
||||
pub(super) single_endpoint_quarantine_bypass_total: u64,
|
||||
pub(super) single_endpoint_shadow_rotate_total: u64,
|
||||
pub(super) single_endpoint_shadow_rotate_skipped_quarantine_total: u64,
|
||||
pub(super) floor_mode_switch_total: u64,
|
||||
pub(super) floor_mode_switch_static_to_adaptive_total: u64,
|
||||
pub(super) floor_mode_switch_adaptive_to_static_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroPoolData {
|
||||
pub(super) pool_swap_total: u64,
|
||||
pub(super) pool_drain_active: u64,
|
||||
pub(super) pool_force_close_total: u64,
|
||||
pub(super) pool_stale_pick_total: u64,
|
||||
pub(super) writer_removed_total: u64,
|
||||
pub(super) writer_removed_unexpected_total: u64,
|
||||
pub(super) refill_triggered_total: u64,
|
||||
pub(super) refill_skipped_inflight_total: u64,
|
||||
pub(super) refill_failed_total: u64,
|
||||
pub(super) writer_restored_same_endpoint_total: u64,
|
||||
pub(super) writer_restored_fallback_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroDesyncData {
|
||||
pub(super) secure_padding_invalid_total: u64,
|
||||
pub(super) desync_total: u64,
|
||||
pub(super) desync_full_logged_total: u64,
|
||||
pub(super) desync_suppressed_total: u64,
|
||||
pub(super) desync_frames_bucket_0: u64,
|
||||
pub(super) desync_frames_bucket_1_2: u64,
|
||||
pub(super) desync_frames_bucket_3_10: u64,
|
||||
pub(super) desync_frames_bucket_gt_10: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct ZeroAllData {
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) core: ZeroCoreData,
|
||||
pub(super) upstream: ZeroUpstreamData,
|
||||
pub(super) middle_proxy: ZeroMiddleProxyData,
|
||||
pub(super) pool: ZeroPoolData,
|
||||
pub(super) desync: ZeroDesyncData,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MeWritersSummary {
|
||||
pub(super) configured_dc_groups: usize,
|
||||
pub(super) configured_endpoints: usize,
|
||||
pub(super) available_endpoints: usize,
|
||||
pub(super) available_pct: f64,
|
||||
pub(super) required_writers: usize,
|
||||
pub(super) alive_writers: usize,
|
||||
pub(super) coverage_pct: f64,
|
||||
pub(super) fresh_alive_writers: usize,
|
||||
pub(super) fresh_coverage_pct: f64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MeWriterStatus {
|
||||
pub(super) writer_id: u64,
|
||||
pub(super) dc: Option<i16>,
|
||||
pub(super) endpoint: String,
|
||||
pub(super) generation: u64,
|
||||
pub(super) state: &'static str,
|
||||
pub(super) draining: bool,
|
||||
pub(super) degraded: bool,
|
||||
pub(super) bound_clients: usize,
|
||||
pub(super) idle_for_secs: Option<u64>,
|
||||
pub(super) rtt_ema_ms: Option<f64>,
|
||||
pub(super) matches_active_generation: bool,
|
||||
pub(super) in_desired_map: bool,
|
||||
pub(super) allow_drain_fallback: bool,
|
||||
pub(super) drain_started_at_epoch_secs: Option<u64>,
|
||||
pub(super) drain_deadline_epoch_secs: Option<u64>,
|
||||
pub(super) drain_over_ttl: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MeWritersData {
|
||||
pub(super) middle_proxy_enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) summary: MeWritersSummary,
|
||||
pub(super) writers: Vec<MeWriterStatus>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct DcStatus {
|
||||
pub(super) dc: i16,
|
||||
pub(super) endpoints: Vec<String>,
|
||||
pub(super) endpoint_writers: Vec<DcEndpointWriters>,
|
||||
pub(super) available_endpoints: usize,
|
||||
pub(super) available_pct: f64,
|
||||
pub(super) required_writers: usize,
|
||||
pub(super) floor_min: usize,
|
||||
pub(super) floor_target: usize,
|
||||
pub(super) floor_max: usize,
|
||||
pub(super) floor_capped: bool,
|
||||
pub(super) alive_writers: usize,
|
||||
pub(super) coverage_pct: f64,
|
||||
pub(super) fresh_alive_writers: usize,
|
||||
pub(super) fresh_coverage_pct: f64,
|
||||
pub(super) rtt_ms: Option<f64>,
|
||||
pub(super) load: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct DcEndpointWriters {
|
||||
pub(super) endpoint: String,
|
||||
pub(super) active_writers: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct DcStatusData {
|
||||
pub(super) middle_proxy_enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) dcs: Vec<DcStatus>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MinimalQuarantineData {
|
||||
pub(super) endpoint: String,
|
||||
pub(super) remaining_ms: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MinimalDcPathData {
|
||||
pub(super) dc: i16,
|
||||
pub(super) ip_preference: Option<&'static str>,
|
||||
pub(super) selected_addr_v4: Option<String>,
|
||||
pub(super) selected_addr_v6: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MinimalMeRuntimeData {
|
||||
pub(super) active_generation: u64,
|
||||
pub(super) warm_generation: u64,
|
||||
pub(super) pending_hardswap_generation: u64,
|
||||
pub(super) pending_hardswap_age_secs: Option<u64>,
|
||||
pub(super) hardswap_enabled: bool,
|
||||
pub(super) floor_mode: &'static str,
|
||||
pub(super) adaptive_floor_idle_secs: u64,
|
||||
pub(super) adaptive_floor_min_writers_single_endpoint: u8,
|
||||
pub(super) adaptive_floor_min_writers_multi_endpoint: u8,
|
||||
pub(super) adaptive_floor_recover_grace_secs: u64,
|
||||
pub(super) adaptive_floor_writers_per_core_total: u16,
|
||||
pub(super) adaptive_floor_cpu_cores_override: u16,
|
||||
pub(super) adaptive_floor_max_extra_writers_single_per_core: u16,
|
||||
pub(super) adaptive_floor_max_extra_writers_multi_per_core: u16,
|
||||
pub(super) adaptive_floor_max_active_writers_per_core: u16,
|
||||
pub(super) adaptive_floor_max_warm_writers_per_core: u16,
|
||||
pub(super) adaptive_floor_max_active_writers_global: u32,
|
||||
pub(super) adaptive_floor_max_warm_writers_global: u32,
|
||||
pub(super) adaptive_floor_cpu_cores_detected: u32,
|
||||
pub(super) adaptive_floor_cpu_cores_effective: u32,
|
||||
pub(super) adaptive_floor_global_cap_raw: u64,
|
||||
pub(super) adaptive_floor_global_cap_effective: u64,
|
||||
pub(super) adaptive_floor_target_writers_total: u64,
|
||||
pub(super) adaptive_floor_active_cap_configured: u64,
|
||||
pub(super) adaptive_floor_active_cap_effective: u64,
|
||||
pub(super) adaptive_floor_warm_cap_configured: u64,
|
||||
pub(super) adaptive_floor_warm_cap_effective: u64,
|
||||
pub(super) adaptive_floor_active_writers_current: u64,
|
||||
pub(super) adaptive_floor_warm_writers_current: u64,
|
||||
pub(super) me_keepalive_enabled: bool,
|
||||
pub(super) me_keepalive_interval_secs: u64,
|
||||
pub(super) me_keepalive_jitter_secs: u64,
|
||||
pub(super) me_keepalive_payload_random: bool,
|
||||
pub(super) rpc_proxy_req_every_secs: u64,
|
||||
pub(super) me_reconnect_max_concurrent_per_dc: u32,
|
||||
pub(super) me_reconnect_backoff_base_ms: u64,
|
||||
pub(super) me_reconnect_backoff_cap_ms: u64,
|
||||
pub(super) me_reconnect_fast_retry_count: u32,
|
||||
pub(super) me_pool_drain_ttl_secs: u64,
|
||||
pub(super) me_pool_force_close_secs: u64,
|
||||
pub(super) me_pool_min_fresh_ratio: f32,
|
||||
pub(super) me_bind_stale_mode: &'static str,
|
||||
pub(super) me_bind_stale_ttl_secs: u64,
|
||||
pub(super) me_single_endpoint_shadow_writers: u8,
|
||||
pub(super) me_single_endpoint_outage_mode_enabled: bool,
|
||||
pub(super) me_single_endpoint_outage_disable_quarantine: bool,
|
||||
pub(super) me_single_endpoint_outage_backoff_min_ms: u64,
|
||||
pub(super) me_single_endpoint_outage_backoff_max_ms: u64,
|
||||
pub(super) me_single_endpoint_shadow_rotate_every_secs: u64,
|
||||
pub(super) me_deterministic_writer_sort: bool,
|
||||
pub(super) me_writer_pick_mode: &'static str,
|
||||
pub(super) me_writer_pick_sample_size: u8,
|
||||
pub(super) me_socks_kdf_policy: &'static str,
|
||||
pub(super) quarantined_endpoints_total: usize,
|
||||
pub(super) quarantined_endpoints: Vec<MinimalQuarantineData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MinimalAllPayload {
|
||||
pub(super) me_writers: MeWritersData,
|
||||
pub(super) dcs: DcStatusData,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) me_runtime: Option<MinimalMeRuntimeData>,
|
||||
pub(super) network_path: Vec<MinimalDcPathData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub(super) struct MinimalAllData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<MinimalAllPayload>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct UserLinks {
|
||||
pub(super) classic: Vec<String>,
|
||||
pub(super) secure: Vec<String>,
|
||||
pub(super) tls: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct UserInfo {
|
||||
pub(super) username: String,
|
||||
pub(super) in_runtime: bool,
|
||||
pub(super) user_ad_tag: Option<String>,
|
||||
pub(super) max_tcp_conns: Option<usize>,
|
||||
pub(super) expiration_rfc3339: Option<String>,
|
||||
pub(super) data_quota_bytes: Option<u64>,
|
||||
pub(super) max_unique_ips: Option<usize>,
|
||||
pub(super) current_connections: u64,
|
||||
pub(super) active_unique_ips: usize,
|
||||
pub(super) active_unique_ips_list: Vec<IpAddr>,
|
||||
pub(super) recent_unique_ips: usize,
|
||||
pub(super) recent_unique_ips_list: Vec<IpAddr>,
|
||||
pub(super) total_octets: u64,
|
||||
pub(super) links: UserLinks,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct UserActiveIps {
|
||||
pub(super) username: String,
|
||||
pub(super) active_ips: Vec<IpAddr>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct CreateUserResponse {
|
||||
pub(super) user: UserInfo,
|
||||
pub(super) secret: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct DeleteUserResponse {
|
||||
pub(super) username: String,
|
||||
pub(super) in_runtime: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(super) struct CreateUserRequest {
|
||||
pub(super) username: String,
|
||||
pub(super) secret: Option<String>,
|
||||
pub(super) user_ad_tag: Option<String>,
|
||||
pub(super) max_tcp_conns: Option<usize>,
|
||||
pub(super) expiration_rfc3339: Option<String>,
|
||||
pub(super) data_quota_bytes: Option<u64>,
|
||||
pub(super) max_unique_ips: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(super) struct PatchUserRequest {
|
||||
pub(super) secret: Option<String>,
|
||||
pub(super) user_ad_tag: Option<String>,
|
||||
pub(super) max_tcp_conns: Option<usize>,
|
||||
pub(super) expiration_rfc3339: Option<String>,
|
||||
pub(super) data_quota_bytes: Option<u64>,
|
||||
pub(super) max_unique_ips: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Default, Deserialize)]
|
||||
pub(super) struct RotateSecretRequest {
|
||||
pub(super) secret: Option<String>,
|
||||
}
|
||||
|
||||
pub(super) fn parse_optional_expiration(
|
||||
value: Option<&str>,
|
||||
) -> Result<Option<DateTime<Utc>>, ApiFailure> {
|
||||
let Some(raw) = value else {
|
||||
return Ok(None);
|
||||
};
|
||||
let parsed = DateTime::parse_from_rfc3339(raw)
|
||||
.map_err(|_| ApiFailure::bad_request("expiration_rfc3339 must be valid RFC3339"))?;
|
||||
Ok(Some(parsed.with_timezone(&Utc)))
|
||||
}
|
||||
|
||||
pub(super) fn is_valid_user_secret(secret: &str) -> bool {
|
||||
secret.len() == 32 && secret.chars().all(|c| c.is_ascii_hexdigit())
|
||||
}
|
||||
|
||||
pub(super) fn is_valid_ad_tag(tag: &str) -> bool {
|
||||
tag.len() == 32 && tag.chars().all(|c| c.is_ascii_hexdigit())
|
||||
}
|
||||
|
||||
pub(super) fn is_valid_username(user: &str) -> bool {
|
||||
!user.is_empty()
|
||||
&& user.len() <= MAX_USERNAME_LEN
|
||||
&& user
|
||||
.chars()
|
||||
.all(|ch| ch.is_ascii_alphanumeric() || matches!(ch, '_' | '-' | '.'))
|
||||
}
|
||||
|
||||
pub(super) fn random_user_secret() -> String {
|
||||
static API_SECRET_RNG: OnceLock<SecureRandom> = OnceLock::new();
|
||||
let rng = API_SECRET_RNG.get_or_init(SecureRandom::new);
|
||||
let mut bytes = [0u8; 16];
|
||||
rng.fill(&mut bytes);
|
||||
hex::encode(bytes)
|
||||
}
|
||||
|
|
@ -0,0 +1,294 @@
|
|||
use std::cmp::Reverse;
|
||||
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
|
||||
use super::ApiShared;
|
||||
use super::events::ApiEventRecord;
|
||||
|
||||
const FEATURE_DISABLED_REASON: &str = "feature_disabled";
|
||||
const SOURCE_UNAVAILABLE_REASON: &str = "source_unavailable";
|
||||
const EVENTS_DEFAULT_LIMIT: usize = 50;
|
||||
const EVENTS_MAX_LIMIT: usize = 1000;
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionUserData {
|
||||
pub(super) username: String,
|
||||
pub(super) current_connections: u64,
|
||||
pub(super) total_octets: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionTotalsData {
|
||||
pub(super) current_connections: u64,
|
||||
pub(super) current_connections_me: u64,
|
||||
pub(super) current_connections_direct: u64,
|
||||
pub(super) active_users: usize,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionTopData {
|
||||
pub(super) limit: usize,
|
||||
pub(super) by_connections: Vec<RuntimeEdgeConnectionUserData>,
|
||||
pub(super) by_throughput: Vec<RuntimeEdgeConnectionUserData>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionCacheData {
|
||||
pub(super) ttl_ms: u64,
|
||||
pub(super) served_from_cache: bool,
|
||||
pub(super) stale_cache_used: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionTelemetryData {
|
||||
pub(super) user_enabled: bool,
|
||||
pub(super) throughput_is_cumulative: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionsSummaryPayload {
|
||||
pub(super) cache: RuntimeEdgeConnectionCacheData,
|
||||
pub(super) totals: RuntimeEdgeConnectionTotalsData,
|
||||
pub(super) top: RuntimeEdgeConnectionTopData,
|
||||
pub(super) telemetry: RuntimeEdgeConnectionTelemetryData,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeEdgeConnectionsSummaryData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeEdgeConnectionsSummaryPayload>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct EdgeConnectionsCacheEntry {
|
||||
pub(super) expires_at: Instant,
|
||||
pub(super) payload: RuntimeEdgeConnectionsSummaryPayload,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeEdgeEventsPayload {
|
||||
pub(super) capacity: usize,
|
||||
pub(super) dropped_total: u64,
|
||||
pub(super) events: Vec<ApiEventRecord>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeEdgeEventsData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeEdgeEventsPayload>,
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_connections_summary_data(
|
||||
shared: &ApiShared,
|
||||
cfg: &ProxyConfig,
|
||||
) -> RuntimeEdgeConnectionsSummaryData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let api_cfg = &cfg.server.api;
|
||||
if !api_cfg.runtime_edge_enabled {
|
||||
return RuntimeEdgeConnectionsSummaryData {
|
||||
enabled: false,
|
||||
reason: Some(FEATURE_DISABLED_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
}
|
||||
|
||||
let (generated_at_epoch_secs, payload) = match get_connections_payload_cached(
|
||||
shared,
|
||||
api_cfg.runtime_edge_cache_ttl_ms,
|
||||
api_cfg.runtime_edge_top_n,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Some(v) => v,
|
||||
None => {
|
||||
return RuntimeEdgeConnectionsSummaryData {
|
||||
enabled: true,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
RuntimeEdgeConnectionsSummaryData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
data: Some(payload),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn build_runtime_events_recent_data(
|
||||
shared: &ApiShared,
|
||||
cfg: &ProxyConfig,
|
||||
query: Option<&str>,
|
||||
) -> RuntimeEdgeEventsData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let api_cfg = &cfg.server.api;
|
||||
if !api_cfg.runtime_edge_enabled {
|
||||
return RuntimeEdgeEventsData {
|
||||
enabled: false,
|
||||
reason: Some(FEATURE_DISABLED_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
}
|
||||
|
||||
let limit = parse_recent_events_limit(query, EVENTS_DEFAULT_LIMIT, EVENTS_MAX_LIMIT);
|
||||
let snapshot = shared.runtime_events.snapshot(limit);
|
||||
|
||||
RuntimeEdgeEventsData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: Some(RuntimeEdgeEventsPayload {
|
||||
capacity: snapshot.capacity,
|
||||
dropped_total: snapshot.dropped_total,
|
||||
events: snapshot.events,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_connections_payload_cached(
|
||||
shared: &ApiShared,
|
||||
cache_ttl_ms: u64,
|
||||
top_n: usize,
|
||||
) -> Option<(u64, RuntimeEdgeConnectionsSummaryPayload)> {
|
||||
if cache_ttl_ms > 0 {
|
||||
let now = Instant::now();
|
||||
let cached = shared.runtime_edge_connections_cache.lock().await.clone();
|
||||
if let Some(entry) = cached
|
||||
&& now < entry.expires_at
|
||||
{
|
||||
let mut payload = entry.payload;
|
||||
payload.cache.served_from_cache = true;
|
||||
payload.cache.stale_cache_used = false;
|
||||
return Some((entry.generated_at_epoch_secs, payload));
|
||||
}
|
||||
}
|
||||
|
||||
let Ok(_guard) = shared.runtime_edge_recompute_lock.try_lock() else {
|
||||
let cached = shared.runtime_edge_connections_cache.lock().await.clone();
|
||||
if let Some(entry) = cached {
|
||||
let mut payload = entry.payload;
|
||||
payload.cache.served_from_cache = true;
|
||||
payload.cache.stale_cache_used = true;
|
||||
return Some((entry.generated_at_epoch_secs, payload));
|
||||
}
|
||||
return None;
|
||||
};
|
||||
|
||||
let generated_at_epoch_secs = now_epoch_secs();
|
||||
let payload = recompute_connections_payload(shared, cache_ttl_ms, top_n).await;
|
||||
|
||||
if cache_ttl_ms > 0 {
|
||||
let entry = EdgeConnectionsCacheEntry {
|
||||
expires_at: Instant::now() + Duration::from_millis(cache_ttl_ms),
|
||||
payload: payload.clone(),
|
||||
generated_at_epoch_secs,
|
||||
};
|
||||
*shared.runtime_edge_connections_cache.lock().await = Some(entry);
|
||||
}
|
||||
|
||||
Some((generated_at_epoch_secs, payload))
|
||||
}
|
||||
|
||||
async fn recompute_connections_payload(
|
||||
shared: &ApiShared,
|
||||
cache_ttl_ms: u64,
|
||||
top_n: usize,
|
||||
) -> RuntimeEdgeConnectionsSummaryPayload {
|
||||
let mut rows = Vec::<RuntimeEdgeConnectionUserData>::new();
|
||||
let mut active_users = 0usize;
|
||||
for entry in shared.stats.iter_user_stats() {
|
||||
let user_stats = entry.value();
|
||||
let current_connections = user_stats
|
||||
.curr_connects
|
||||
.load(std::sync::atomic::Ordering::Relaxed);
|
||||
let total_octets = user_stats
|
||||
.octets_from_client
|
||||
.load(std::sync::atomic::Ordering::Relaxed)
|
||||
.saturating_add(
|
||||
user_stats
|
||||
.octets_to_client
|
||||
.load(std::sync::atomic::Ordering::Relaxed),
|
||||
);
|
||||
if current_connections > 0 {
|
||||
active_users = active_users.saturating_add(1);
|
||||
}
|
||||
rows.push(RuntimeEdgeConnectionUserData {
|
||||
username: entry.key().clone(),
|
||||
current_connections,
|
||||
total_octets,
|
||||
});
|
||||
}
|
||||
|
||||
let limit = top_n.max(1);
|
||||
let mut by_connections = rows.clone();
|
||||
by_connections.sort_by_key(|row| (Reverse(row.current_connections), row.username.clone()));
|
||||
by_connections.truncate(limit);
|
||||
|
||||
let mut by_throughput = rows;
|
||||
by_throughput.sort_by_key(|row| (Reverse(row.total_octets), row.username.clone()));
|
||||
by_throughput.truncate(limit);
|
||||
|
||||
let telemetry = shared.stats.telemetry_policy();
|
||||
RuntimeEdgeConnectionsSummaryPayload {
|
||||
cache: RuntimeEdgeConnectionCacheData {
|
||||
ttl_ms: cache_ttl_ms,
|
||||
served_from_cache: false,
|
||||
stale_cache_used: false,
|
||||
},
|
||||
totals: RuntimeEdgeConnectionTotalsData {
|
||||
current_connections: shared.stats.get_current_connections_total(),
|
||||
current_connections_me: shared.stats.get_current_connections_me(),
|
||||
current_connections_direct: shared.stats.get_current_connections_direct(),
|
||||
active_users,
|
||||
},
|
||||
top: RuntimeEdgeConnectionTopData {
|
||||
limit,
|
||||
by_connections,
|
||||
by_throughput,
|
||||
},
|
||||
telemetry: RuntimeEdgeConnectionTelemetryData {
|
||||
user_enabled: telemetry.user_enabled,
|
||||
throughput_is_cumulative: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_recent_events_limit(query: Option<&str>, default_limit: usize, max_limit: usize) -> usize {
|
||||
let Some(query) = query else {
|
||||
return default_limit;
|
||||
};
|
||||
for pair in query.split('&') {
|
||||
let mut split = pair.splitn(2, '=');
|
||||
if split.next() == Some("limit")
|
||||
&& let Some(raw) = split.next()
|
||||
&& let Ok(parsed) = raw.parse::<usize>()
|
||||
{
|
||||
return parsed.clamp(1, max_limit);
|
||||
}
|
||||
}
|
||||
default_limit
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
|
|
@ -0,0 +1,182 @@
|
|||
use serde::Serialize;
|
||||
|
||||
use crate::startup::{
|
||||
COMPONENT_ME_CONNECTIVITY_PING, COMPONENT_ME_POOL_CONSTRUCT, COMPONENT_ME_POOL_INIT_STAGE1,
|
||||
COMPONENT_ME_PROXY_CONFIG_V4, COMPONENT_ME_PROXY_CONFIG_V6, COMPONENT_ME_SECRET_FETCH,
|
||||
StartupComponentStatus, StartupMeStatus, compute_progress_pct,
|
||||
};
|
||||
|
||||
use super::ApiShared;
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeInitializationComponentData {
|
||||
pub(super) id: &'static str,
|
||||
pub(super) title: &'static str,
|
||||
pub(super) status: &'static str,
|
||||
pub(super) started_at_epoch_ms: Option<u64>,
|
||||
pub(super) finished_at_epoch_ms: Option<u64>,
|
||||
pub(super) duration_ms: Option<u64>,
|
||||
pub(super) attempts: u32,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) details: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeInitializationMeData {
|
||||
pub(super) status: &'static str,
|
||||
pub(super) current_stage: String,
|
||||
pub(super) progress_pct: f64,
|
||||
pub(super) init_attempt: u32,
|
||||
pub(super) retry_limit: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) last_error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeInitializationData {
|
||||
pub(super) status: &'static str,
|
||||
pub(super) degraded: bool,
|
||||
pub(super) current_stage: String,
|
||||
pub(super) progress_pct: f64,
|
||||
pub(super) started_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) ready_at_epoch_secs: Option<u64>,
|
||||
pub(super) total_elapsed_ms: u64,
|
||||
pub(super) transport_mode: String,
|
||||
pub(super) me: RuntimeInitializationMeData,
|
||||
pub(super) components: Vec<RuntimeInitializationComponentData>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(super) struct RuntimeStartupSummaryData {
|
||||
pub(super) status: &'static str,
|
||||
pub(super) stage: String,
|
||||
pub(super) progress_pct: f64,
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_startup_summary(shared: &ApiShared) -> RuntimeStartupSummaryData {
|
||||
let snapshot = shared.startup_tracker.snapshot().await;
|
||||
let me_pool_progress = current_me_pool_stage_progress(shared).await;
|
||||
let progress_pct = compute_progress_pct(&snapshot, me_pool_progress);
|
||||
RuntimeStartupSummaryData {
|
||||
status: snapshot.status.as_str(),
|
||||
stage: snapshot.current_stage,
|
||||
progress_pct,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_initialization_data(
|
||||
shared: &ApiShared,
|
||||
) -> RuntimeInitializationData {
|
||||
let snapshot = shared.startup_tracker.snapshot().await;
|
||||
let me_pool_progress = current_me_pool_stage_progress(shared).await;
|
||||
let progress_pct = compute_progress_pct(&snapshot, me_pool_progress);
|
||||
let me_progress_pct = compute_me_progress_pct(&snapshot, me_pool_progress);
|
||||
|
||||
RuntimeInitializationData {
|
||||
status: snapshot.status.as_str(),
|
||||
degraded: snapshot.degraded,
|
||||
current_stage: snapshot.current_stage,
|
||||
progress_pct,
|
||||
started_at_epoch_secs: snapshot.started_at_epoch_secs,
|
||||
ready_at_epoch_secs: snapshot.ready_at_epoch_secs,
|
||||
total_elapsed_ms: snapshot.total_elapsed_ms,
|
||||
transport_mode: snapshot.transport_mode,
|
||||
me: RuntimeInitializationMeData {
|
||||
status: snapshot.me.status.as_str(),
|
||||
current_stage: snapshot.me.current_stage,
|
||||
progress_pct: me_progress_pct,
|
||||
init_attempt: snapshot.me.init_attempt,
|
||||
retry_limit: snapshot.me.retry_limit,
|
||||
last_error: snapshot.me.last_error,
|
||||
},
|
||||
components: snapshot
|
||||
.components
|
||||
.into_iter()
|
||||
.map(|component| RuntimeInitializationComponentData {
|
||||
id: component.id,
|
||||
title: component.title,
|
||||
status: component.status.as_str(),
|
||||
started_at_epoch_ms: component.started_at_epoch_ms,
|
||||
finished_at_epoch_ms: component.finished_at_epoch_ms,
|
||||
duration_ms: component.duration_ms,
|
||||
attempts: component.attempts,
|
||||
details: component.details,
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_me_progress_pct(
|
||||
snapshot: &crate::startup::StartupSnapshot,
|
||||
me_pool_progress: Option<f64>,
|
||||
) -> f64 {
|
||||
match snapshot.me.status {
|
||||
StartupMeStatus::Pending => 0.0,
|
||||
StartupMeStatus::Ready | StartupMeStatus::Failed | StartupMeStatus::Skipped => 100.0,
|
||||
StartupMeStatus::Initializing => {
|
||||
let mut total_weight = 0.0f64;
|
||||
let mut completed_weight = 0.0f64;
|
||||
for component in &snapshot.components {
|
||||
if !is_me_component(component.id) {
|
||||
continue;
|
||||
}
|
||||
total_weight += component.weight;
|
||||
let unit_progress = match component.status {
|
||||
StartupComponentStatus::Pending => 0.0,
|
||||
StartupComponentStatus::Running => {
|
||||
if component.id == COMPONENT_ME_POOL_INIT_STAGE1 {
|
||||
me_pool_progress.unwrap_or(0.0).clamp(0.0, 1.0)
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
}
|
||||
StartupComponentStatus::Ready
|
||||
| StartupComponentStatus::Failed
|
||||
| StartupComponentStatus::Skipped => 1.0,
|
||||
};
|
||||
completed_weight += component.weight * unit_progress;
|
||||
}
|
||||
if total_weight <= f64::EPSILON {
|
||||
0.0
|
||||
} else {
|
||||
((completed_weight / total_weight) * 100.0).clamp(0.0, 100.0)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn is_me_component(component_id: &str) -> bool {
|
||||
matches!(
|
||||
component_id,
|
||||
COMPONENT_ME_SECRET_FETCH
|
||||
| COMPONENT_ME_PROXY_CONFIG_V4
|
||||
| COMPONENT_ME_PROXY_CONFIG_V6
|
||||
| COMPONENT_ME_POOL_CONSTRUCT
|
||||
| COMPONENT_ME_POOL_INIT_STAGE1
|
||||
| COMPONENT_ME_CONNECTIVITY_PING
|
||||
)
|
||||
}
|
||||
|
||||
async fn current_me_pool_stage_progress(shared: &ApiShared) -> Option<f64> {
|
||||
let snapshot = shared.startup_tracker.snapshot().await;
|
||||
if snapshot.me.status != StartupMeStatus::Initializing {
|
||||
return None;
|
||||
}
|
||||
|
||||
let pool = shared.me_pool.read().await.clone()?;
|
||||
let status = pool.api_status_snapshot().await;
|
||||
let configured_dc_groups = status.configured_dc_groups;
|
||||
let covered_dc_groups = status.dcs.iter().filter(|dc| dc.alive_writers > 0).count();
|
||||
|
||||
let dc_coverage = ratio_01(covered_dc_groups, configured_dc_groups);
|
||||
let writer_coverage = ratio_01(status.alive_writers, status.required_writers);
|
||||
Some((0.7 * dc_coverage + 0.3 * writer_coverage).clamp(0.0, 1.0))
|
||||
}
|
||||
|
||||
fn ratio_01(part: usize, total: usize) -> f64 {
|
||||
if total == 0 {
|
||||
return 0.0;
|
||||
}
|
||||
((part as f64) / (total as f64)).clamp(0.0, 1.0)
|
||||
}
|
||||
|
|
@ -0,0 +1,586 @@
|
|||
use std::collections::BTreeSet;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
|
||||
use super::ApiShared;
|
||||
|
||||
const SOURCE_UNAVAILABLE_REASON: &str = "source_unavailable";
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct SecurityWhitelistData {
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) enabled: bool,
|
||||
pub(super) entries_total: usize,
|
||||
pub(super) entries: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateGenerationData {
|
||||
pub(super) active_generation: u64,
|
||||
pub(super) warm_generation: u64,
|
||||
pub(super) pending_hardswap_generation: u64,
|
||||
pub(super) pending_hardswap_age_secs: Option<u64>,
|
||||
pub(super) draining_generations: Vec<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateHardswapData {
|
||||
pub(super) enabled: bool,
|
||||
pub(super) pending: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateWriterContourData {
|
||||
pub(super) warm: usize,
|
||||
pub(super) active: usize,
|
||||
pub(super) draining: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateWriterHealthData {
|
||||
pub(super) healthy: usize,
|
||||
pub(super) degraded: usize,
|
||||
pub(super) draining: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateWriterData {
|
||||
pub(super) total: usize,
|
||||
pub(super) alive_non_draining: usize,
|
||||
pub(super) draining: usize,
|
||||
pub(super) degraded: usize,
|
||||
pub(super) contour: RuntimeMePoolStateWriterContourData,
|
||||
pub(super) health: RuntimeMePoolStateWriterHealthData,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateRefillDcData {
|
||||
pub(super) dc: i16,
|
||||
pub(super) family: &'static str,
|
||||
pub(super) inflight: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateRefillData {
|
||||
pub(super) inflight_endpoints_total: usize,
|
||||
pub(super) inflight_dc_total: usize,
|
||||
pub(super) by_dc: Vec<RuntimeMePoolStateRefillDcData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStatePayload {
|
||||
pub(super) generations: RuntimeMePoolStateGenerationData,
|
||||
pub(super) hardswap: RuntimeMePoolStateHardswapData,
|
||||
pub(super) writers: RuntimeMePoolStateWriterData,
|
||||
pub(super) refill: RuntimeMePoolStateRefillData,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMePoolStateData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeMePoolStatePayload>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityCountersData {
|
||||
pub(super) idle_close_by_peer_total: u64,
|
||||
pub(super) reader_eof_total: u64,
|
||||
pub(super) kdf_drift_total: u64,
|
||||
pub(super) kdf_port_only_drift_total: u64,
|
||||
pub(super) reconnect_attempt_total: u64,
|
||||
pub(super) reconnect_success_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityRouteDropData {
|
||||
pub(super) no_conn_total: u64,
|
||||
pub(super) channel_closed_total: u64,
|
||||
pub(super) queue_full_total: u64,
|
||||
pub(super) queue_full_base_total: u64,
|
||||
pub(super) queue_full_high_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityFamilyStateData {
|
||||
pub(super) family: &'static str,
|
||||
pub(super) state: &'static str,
|
||||
pub(super) state_since_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) suppressed_until_epoch_secs: Option<u64>,
|
||||
pub(super) fail_streak: u32,
|
||||
pub(super) recover_success_streak: u32,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityDrainGateData {
|
||||
pub(super) route_quorum_ok: bool,
|
||||
pub(super) redundancy_ok: bool,
|
||||
pub(super) block_reason: &'static str,
|
||||
pub(super) updated_at_epoch_secs: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityDcRttData {
|
||||
pub(super) dc: i16,
|
||||
pub(super) rtt_ema_ms: Option<f64>,
|
||||
pub(super) alive_writers: usize,
|
||||
pub(super) required_writers: usize,
|
||||
pub(super) coverage_pct: f64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityPayload {
|
||||
pub(super) counters: RuntimeMeQualityCountersData,
|
||||
pub(super) route_drops: RuntimeMeQualityRouteDropData,
|
||||
pub(super) family_states: Vec<RuntimeMeQualityFamilyStateData>,
|
||||
pub(super) drain_gate: RuntimeMeQualityDrainGateData,
|
||||
pub(super) dc_rtt: Vec<RuntimeMeQualityDcRttData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeMeQualityPayload>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityPolicyData {
|
||||
pub(super) connect_retry_attempts: u32,
|
||||
pub(super) connect_retry_backoff_ms: u64,
|
||||
pub(super) connect_budget_ms: u64,
|
||||
pub(super) unhealthy_fail_threshold: u32,
|
||||
pub(super) connect_failfast_hard_errors: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityCountersData {
|
||||
pub(super) connect_attempt_total: u64,
|
||||
pub(super) connect_success_total: u64,
|
||||
pub(super) connect_fail_total: u64,
|
||||
pub(super) connect_failfast_hard_error_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualitySummaryData {
|
||||
pub(super) configured_total: usize,
|
||||
pub(super) healthy_total: usize,
|
||||
pub(super) unhealthy_total: usize,
|
||||
pub(super) direct_total: usize,
|
||||
pub(super) socks4_total: usize,
|
||||
pub(super) socks5_total: usize,
|
||||
pub(super) shadowsocks_total: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityDcData {
|
||||
pub(super) dc: i16,
|
||||
pub(super) latency_ema_ms: Option<f64>,
|
||||
pub(super) ip_preference: &'static str,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityUpstreamData {
|
||||
pub(super) upstream_id: usize,
|
||||
pub(super) route_kind: &'static str,
|
||||
pub(super) address: String,
|
||||
pub(super) weight: u16,
|
||||
pub(super) scopes: String,
|
||||
pub(super) healthy: bool,
|
||||
pub(super) fails: u32,
|
||||
pub(super) last_check_age_secs: u64,
|
||||
pub(super) effective_latency_ms: Option<f64>,
|
||||
pub(super) dc: Vec<RuntimeUpstreamQualityDcData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeUpstreamQualityData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
pub(super) policy: RuntimeUpstreamQualityPolicyData,
|
||||
pub(super) counters: RuntimeUpstreamQualityCountersData,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) summary: Option<RuntimeUpstreamQualitySummaryData>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) upstreams: Option<Vec<RuntimeUpstreamQualityUpstreamData>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunReflectionData {
|
||||
pub(super) addr: String,
|
||||
pub(super) age_secs: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunFlagsData {
|
||||
pub(super) nat_probe_enabled: bool,
|
||||
pub(super) nat_probe_disabled_runtime: bool,
|
||||
pub(super) nat_probe_attempts: u8,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunServersData {
|
||||
pub(super) configured: Vec<String>,
|
||||
pub(super) live: Vec<String>,
|
||||
pub(super) live_total: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunReflectionBlockData {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) v4: Option<RuntimeNatStunReflectionData>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) v6: Option<RuntimeNatStunReflectionData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunPayload {
|
||||
pub(super) flags: RuntimeNatStunFlagsData,
|
||||
pub(super) servers: RuntimeNatStunServersData,
|
||||
pub(super) reflection: RuntimeNatStunReflectionBlockData,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) stun_backoff_remaining_ms: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeNatStunData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeNatStunPayload>,
|
||||
}
|
||||
|
||||
pub(super) fn build_security_whitelist_data(cfg: &ProxyConfig) -> SecurityWhitelistData {
|
||||
let entries = cfg
|
||||
.server
|
||||
.api
|
||||
.whitelist
|
||||
.iter()
|
||||
.map(ToString::to_string)
|
||||
.collect::<Vec<_>>();
|
||||
SecurityWhitelistData {
|
||||
generated_at_epoch_secs: now_epoch_secs(),
|
||||
enabled: !entries.is_empty(),
|
||||
entries_total: entries.len(),
|
||||
entries,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_me_pool_state_data(shared: &ApiShared) -> RuntimeMePoolStateData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let Some(pool) = shared.me_pool.read().await.clone() else {
|
||||
return RuntimeMePoolStateData {
|
||||
enabled: false,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
};
|
||||
|
||||
let status = pool.api_status_snapshot().await;
|
||||
let runtime = pool.api_runtime_snapshot().await;
|
||||
let refill = pool.api_refill_snapshot().await;
|
||||
|
||||
let mut draining_generations = BTreeSet::<u64>::new();
|
||||
let mut contour_warm = 0usize;
|
||||
let mut contour_active = 0usize;
|
||||
let mut contour_draining = 0usize;
|
||||
let mut draining = 0usize;
|
||||
let mut degraded = 0usize;
|
||||
let mut healthy = 0usize;
|
||||
|
||||
for writer in &status.writers {
|
||||
if writer.draining {
|
||||
draining_generations.insert(writer.generation);
|
||||
draining += 1;
|
||||
}
|
||||
if writer.degraded && !writer.draining {
|
||||
degraded += 1;
|
||||
}
|
||||
if !writer.degraded && !writer.draining {
|
||||
healthy += 1;
|
||||
}
|
||||
match writer.state {
|
||||
"warm" => contour_warm += 1,
|
||||
"active" => contour_active += 1,
|
||||
_ => contour_draining += 1,
|
||||
}
|
||||
}
|
||||
|
||||
RuntimeMePoolStateData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs: status.generated_at_epoch_secs,
|
||||
data: Some(RuntimeMePoolStatePayload {
|
||||
generations: RuntimeMePoolStateGenerationData {
|
||||
active_generation: runtime.active_generation,
|
||||
warm_generation: runtime.warm_generation,
|
||||
pending_hardswap_generation: runtime.pending_hardswap_generation,
|
||||
pending_hardswap_age_secs: runtime.pending_hardswap_age_secs,
|
||||
draining_generations: draining_generations.into_iter().collect(),
|
||||
},
|
||||
hardswap: RuntimeMePoolStateHardswapData {
|
||||
enabled: runtime.hardswap_enabled,
|
||||
pending: runtime.pending_hardswap_generation != 0,
|
||||
},
|
||||
writers: RuntimeMePoolStateWriterData {
|
||||
total: status.writers.len(),
|
||||
alive_non_draining: status.writers.len().saturating_sub(draining),
|
||||
draining,
|
||||
degraded,
|
||||
contour: RuntimeMePoolStateWriterContourData {
|
||||
warm: contour_warm,
|
||||
active: contour_active,
|
||||
draining: contour_draining,
|
||||
},
|
||||
health: RuntimeMePoolStateWriterHealthData {
|
||||
healthy,
|
||||
degraded,
|
||||
draining,
|
||||
},
|
||||
},
|
||||
refill: RuntimeMePoolStateRefillData {
|
||||
inflight_endpoints_total: refill.inflight_endpoints_total,
|
||||
inflight_dc_total: refill.inflight_dc_total,
|
||||
by_dc: refill
|
||||
.by_dc
|
||||
.into_iter()
|
||||
.map(|entry| RuntimeMePoolStateRefillDcData {
|
||||
dc: entry.dc,
|
||||
family: entry.family,
|
||||
inflight: entry.inflight,
|
||||
})
|
||||
.collect(),
|
||||
},
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_me_quality_data(shared: &ApiShared) -> RuntimeMeQualityData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let Some(pool) = shared.me_pool.read().await.clone() else {
|
||||
return RuntimeMeQualityData {
|
||||
enabled: false,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
};
|
||||
|
||||
let status = pool.api_status_snapshot().await;
|
||||
let family_states = pool
|
||||
.api_family_state_snapshot()
|
||||
.into_iter()
|
||||
.map(|entry| RuntimeMeQualityFamilyStateData {
|
||||
family: entry.family,
|
||||
state: entry.state,
|
||||
state_since_epoch_secs: entry.state_since_epoch_secs,
|
||||
suppressed_until_epoch_secs: entry.suppressed_until_epoch_secs,
|
||||
fail_streak: entry.fail_streak,
|
||||
recover_success_streak: entry.recover_success_streak,
|
||||
})
|
||||
.collect();
|
||||
let drain_gate_snapshot = pool.api_drain_gate_snapshot();
|
||||
RuntimeMeQualityData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs: status.generated_at_epoch_secs,
|
||||
data: Some(RuntimeMeQualityPayload {
|
||||
counters: RuntimeMeQualityCountersData {
|
||||
idle_close_by_peer_total: shared.stats.get_me_idle_close_by_peer_total(),
|
||||
reader_eof_total: shared.stats.get_me_reader_eof_total(),
|
||||
kdf_drift_total: shared.stats.get_me_kdf_drift_total(),
|
||||
kdf_port_only_drift_total: shared.stats.get_me_kdf_port_only_drift_total(),
|
||||
reconnect_attempt_total: shared.stats.get_me_reconnect_attempts(),
|
||||
reconnect_success_total: shared.stats.get_me_reconnect_success(),
|
||||
},
|
||||
route_drops: RuntimeMeQualityRouteDropData {
|
||||
no_conn_total: shared.stats.get_me_route_drop_no_conn(),
|
||||
channel_closed_total: shared.stats.get_me_route_drop_channel_closed(),
|
||||
queue_full_total: shared.stats.get_me_route_drop_queue_full(),
|
||||
queue_full_base_total: shared.stats.get_me_route_drop_queue_full_base(),
|
||||
queue_full_high_total: shared.stats.get_me_route_drop_queue_full_high(),
|
||||
},
|
||||
family_states,
|
||||
drain_gate: RuntimeMeQualityDrainGateData {
|
||||
route_quorum_ok: drain_gate_snapshot.route_quorum_ok,
|
||||
redundancy_ok: drain_gate_snapshot.redundancy_ok,
|
||||
block_reason: drain_gate_snapshot.block_reason,
|
||||
updated_at_epoch_secs: drain_gate_snapshot.updated_at_epoch_secs,
|
||||
},
|
||||
dc_rtt: status
|
||||
.dcs
|
||||
.into_iter()
|
||||
.map(|dc| RuntimeMeQualityDcRttData {
|
||||
dc: dc.dc,
|
||||
rtt_ema_ms: dc.rtt_ms,
|
||||
alive_writers: dc.alive_writers,
|
||||
required_writers: dc.required_writers,
|
||||
coverage_pct: dc.coverage_pct,
|
||||
})
|
||||
.collect(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_upstream_quality_data(
|
||||
shared: &ApiShared,
|
||||
) -> RuntimeUpstreamQualityData {
|
||||
let generated_at_epoch_secs = now_epoch_secs();
|
||||
let policy = shared.upstream_manager.api_policy_snapshot();
|
||||
let counters = RuntimeUpstreamQualityCountersData {
|
||||
connect_attempt_total: shared.stats.get_upstream_connect_attempt_total(),
|
||||
connect_success_total: shared.stats.get_upstream_connect_success_total(),
|
||||
connect_fail_total: shared.stats.get_upstream_connect_fail_total(),
|
||||
connect_failfast_hard_error_total: shared
|
||||
.stats
|
||||
.get_upstream_connect_failfast_hard_error_total(),
|
||||
};
|
||||
|
||||
let Some(snapshot) = shared.upstream_manager.try_api_snapshot() else {
|
||||
return RuntimeUpstreamQualityData {
|
||||
enabled: false,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs,
|
||||
policy: RuntimeUpstreamQualityPolicyData {
|
||||
connect_retry_attempts: policy.connect_retry_attempts,
|
||||
connect_retry_backoff_ms: policy.connect_retry_backoff_ms,
|
||||
connect_budget_ms: policy.connect_budget_ms,
|
||||
unhealthy_fail_threshold: policy.unhealthy_fail_threshold,
|
||||
connect_failfast_hard_errors: policy.connect_failfast_hard_errors,
|
||||
},
|
||||
counters,
|
||||
summary: None,
|
||||
upstreams: None,
|
||||
};
|
||||
};
|
||||
|
||||
RuntimeUpstreamQualityData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
policy: RuntimeUpstreamQualityPolicyData {
|
||||
connect_retry_attempts: policy.connect_retry_attempts,
|
||||
connect_retry_backoff_ms: policy.connect_retry_backoff_ms,
|
||||
connect_budget_ms: policy.connect_budget_ms,
|
||||
unhealthy_fail_threshold: policy.unhealthy_fail_threshold,
|
||||
connect_failfast_hard_errors: policy.connect_failfast_hard_errors,
|
||||
},
|
||||
counters,
|
||||
summary: Some(RuntimeUpstreamQualitySummaryData {
|
||||
configured_total: snapshot.summary.configured_total,
|
||||
healthy_total: snapshot.summary.healthy_total,
|
||||
unhealthy_total: snapshot.summary.unhealthy_total,
|
||||
direct_total: snapshot.summary.direct_total,
|
||||
socks4_total: snapshot.summary.socks4_total,
|
||||
socks5_total: snapshot.summary.socks5_total,
|
||||
shadowsocks_total: snapshot.summary.shadowsocks_total,
|
||||
}),
|
||||
upstreams: Some(
|
||||
snapshot
|
||||
.upstreams
|
||||
.into_iter()
|
||||
.map(|upstream| RuntimeUpstreamQualityUpstreamData {
|
||||
upstream_id: upstream.upstream_id,
|
||||
route_kind: match upstream.route_kind {
|
||||
crate::transport::UpstreamRouteKind::Direct => "direct",
|
||||
crate::transport::UpstreamRouteKind::Socks4 => "socks4",
|
||||
crate::transport::UpstreamRouteKind::Socks5 => "socks5",
|
||||
crate::transport::UpstreamRouteKind::Shadowsocks => "shadowsocks",
|
||||
},
|
||||
address: upstream.address,
|
||||
weight: upstream.weight,
|
||||
scopes: upstream.scopes,
|
||||
healthy: upstream.healthy,
|
||||
fails: upstream.fails,
|
||||
last_check_age_secs: upstream.last_check_age_secs,
|
||||
effective_latency_ms: upstream.effective_latency_ms,
|
||||
dc: upstream
|
||||
.dc
|
||||
.into_iter()
|
||||
.map(|dc| RuntimeUpstreamQualityDcData {
|
||||
dc: dc.dc,
|
||||
latency_ema_ms: dc.latency_ema_ms,
|
||||
ip_preference: match dc.ip_preference {
|
||||
crate::transport::upstream::IpPreference::Unknown => "unknown",
|
||||
crate::transport::upstream::IpPreference::PreferV6 => "prefer_v6",
|
||||
crate::transport::upstream::IpPreference::PreferV4 => "prefer_v4",
|
||||
crate::transport::upstream::IpPreference::BothWork => "both_work",
|
||||
crate::transport::upstream::IpPreference::Unavailable => {
|
||||
"unavailable"
|
||||
}
|
||||
},
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
.collect(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_nat_stun_data(shared: &ApiShared) -> RuntimeNatStunData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
let Some(pool) = shared.me_pool.read().await.clone() else {
|
||||
return RuntimeNatStunData {
|
||||
enabled: false,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
};
|
||||
|
||||
let snapshot = pool.api_nat_stun_snapshot().await;
|
||||
RuntimeNatStunData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: Some(RuntimeNatStunPayload {
|
||||
flags: RuntimeNatStunFlagsData {
|
||||
nat_probe_enabled: snapshot.nat_probe_enabled,
|
||||
nat_probe_disabled_runtime: snapshot.nat_probe_disabled_runtime,
|
||||
nat_probe_attempts: snapshot.nat_probe_attempts,
|
||||
},
|
||||
servers: RuntimeNatStunServersData {
|
||||
configured: snapshot.configured_servers,
|
||||
live: snapshot.live_servers.clone(),
|
||||
live_total: snapshot.live_servers.len(),
|
||||
},
|
||||
reflection: RuntimeNatStunReflectionBlockData {
|
||||
v4: snapshot
|
||||
.reflection_v4
|
||||
.map(|entry| RuntimeNatStunReflectionData {
|
||||
addr: entry.addr.to_string(),
|
||||
age_secs: entry.age_secs,
|
||||
}),
|
||||
v6: snapshot
|
||||
.reflection_v6
|
||||
.map(|entry| RuntimeNatStunReflectionData {
|
||||
addr: entry.addr.to_string(),
|
||||
age_secs: entry.age_secs,
|
||||
}),
|
||||
},
|
||||
stun_backoff_remaining_ms: snapshot.stun_backoff_remaining_ms,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
|
|
@ -0,0 +1,300 @@
|
|||
use std::collections::HashMap;
|
||||
use std::net::IpAddr;
|
||||
use std::sync::{Mutex, OnceLock};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::config::{ProxyConfig, UpstreamType};
|
||||
use crate::network::probe::{detect_interface_ipv4, detect_interface_ipv6, is_bogon};
|
||||
use crate::transport::UpstreamRouteKind;
|
||||
use crate::transport::middle_proxy::{bnd_snapshot, timeskew_snapshot, upstream_bnd_snapshots};
|
||||
|
||||
use super::ApiShared;
|
||||
|
||||
const SOURCE_UNAVAILABLE_REASON: &str = "source_unavailable";
|
||||
const KDF_EWMA_TAU_SECS: f64 = 600.0;
|
||||
const KDF_EWMA_THRESHOLD_ERRORS_PER_MIN: f64 = 0.30;
|
||||
const TIMESKEW_THRESHOLD_SECS: u64 = 60;
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeSelftestKdfData {
|
||||
pub(super) state: &'static str,
|
||||
pub(super) ewma_errors_per_min: f64,
|
||||
pub(super) threshold_errors_per_min: f64,
|
||||
pub(super) errors_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeSelftestTimeskewData {
|
||||
pub(super) state: &'static str,
|
||||
pub(super) max_skew_secs_15m: Option<u64>,
|
||||
pub(super) samples_15m: usize,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) last_skew_secs: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) last_source: Option<&'static str>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) last_seen_age_secs: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeSelftestIpFamilyData {
|
||||
pub(super) addr: String,
|
||||
pub(super) state: &'static str,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeSelftestIpData {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) v4: Option<RuntimeMeSelftestIpFamilyData>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) v6: Option<RuntimeMeSelftestIpFamilyData>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeSelftestPidData {
|
||||
pub(super) pid: u32,
|
||||
pub(super) state: &'static str,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeSelftestBndData {
|
||||
pub(super) addr_state: &'static str,
|
||||
pub(super) port_state: &'static str,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) last_addr: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) last_seen_age_secs: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeSelftestUpstreamData {
|
||||
pub(super) upstream_id: usize,
|
||||
pub(super) route_kind: &'static str,
|
||||
pub(super) address: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) bnd: Option<RuntimeMeSelftestBndData>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) ip: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeSelftestPayload {
|
||||
pub(super) kdf: RuntimeMeSelftestKdfData,
|
||||
pub(super) timeskew: RuntimeMeSelftestTimeskewData,
|
||||
pub(super) ip: RuntimeMeSelftestIpData,
|
||||
pub(super) pid: RuntimeMeSelftestPidData,
|
||||
pub(super) bnd: Option<RuntimeMeSelftestBndData>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) upstreams: Option<Vec<RuntimeMeSelftestUpstreamData>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeSelftestData {
|
||||
pub(super) enabled: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reason: Option<&'static str>,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) data: Option<RuntimeMeSelftestPayload>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct KdfEwmaState {
|
||||
initialized: bool,
|
||||
last_epoch_secs: u64,
|
||||
last_total_errors: u64,
|
||||
ewma_errors_per_min: f64,
|
||||
}
|
||||
|
||||
static KDF_EWMA_STATE: OnceLock<Mutex<KdfEwmaState>> = OnceLock::new();
|
||||
|
||||
fn kdf_ewma_state() -> &'static Mutex<KdfEwmaState> {
|
||||
KDF_EWMA_STATE.get_or_init(|| Mutex::new(KdfEwmaState::default()))
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_me_selftest_data(
|
||||
shared: &ApiShared,
|
||||
cfg: &ProxyConfig,
|
||||
) -> RuntimeMeSelftestData {
|
||||
let now_epoch_secs = now_epoch_secs();
|
||||
if shared.me_pool.read().await.is_none() {
|
||||
return RuntimeMeSelftestData {
|
||||
enabled: false,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: None,
|
||||
};
|
||||
}
|
||||
|
||||
let kdf_errors_total = shared
|
||||
.stats
|
||||
.get_me_kdf_drift_total()
|
||||
.saturating_add(shared.stats.get_me_socks_kdf_strict_reject());
|
||||
let kdf_ewma = update_kdf_ewma(now_epoch_secs, kdf_errors_total);
|
||||
let kdf_state = if kdf_ewma >= KDF_EWMA_THRESHOLD_ERRORS_PER_MIN {
|
||||
"error"
|
||||
} else {
|
||||
"ok"
|
||||
};
|
||||
|
||||
let skew = timeskew_snapshot();
|
||||
let timeskew_state = if skew.max_skew_secs_15m.unwrap_or(0) > TIMESKEW_THRESHOLD_SECS {
|
||||
"error"
|
||||
} else {
|
||||
"ok"
|
||||
};
|
||||
|
||||
let ip_v4 = detect_interface_ipv4().map(|ip| RuntimeMeSelftestIpFamilyData {
|
||||
addr: ip.to_string(),
|
||||
state: classify_ip(IpAddr::V4(ip)),
|
||||
});
|
||||
let ip_v6 = detect_interface_ipv6().map(|ip| RuntimeMeSelftestIpFamilyData {
|
||||
addr: ip.to_string(),
|
||||
state: classify_ip(IpAddr::V6(ip)),
|
||||
});
|
||||
|
||||
let pid = std::process::id();
|
||||
let pid_state = if pid == 1 { "one" } else { "non-one" };
|
||||
|
||||
let has_socks_upstreams = cfg.upstreams.iter().any(|upstream| {
|
||||
upstream.enabled
|
||||
&& matches!(
|
||||
upstream.upstream_type,
|
||||
UpstreamType::Socks4 { .. } | UpstreamType::Socks5 { .. }
|
||||
)
|
||||
});
|
||||
|
||||
let bnd = if has_socks_upstreams {
|
||||
let snapshot = bnd_snapshot();
|
||||
Some(RuntimeMeSelftestBndData {
|
||||
addr_state: snapshot.addr_status,
|
||||
port_state: snapshot.port_status,
|
||||
last_addr: snapshot.last_addr.map(|value| value.to_string()),
|
||||
last_seen_age_secs: snapshot.last_seen_age_secs,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let upstreams = build_upstream_selftest_data(shared);
|
||||
|
||||
RuntimeMeSelftestData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
data: Some(RuntimeMeSelftestPayload {
|
||||
kdf: RuntimeMeSelftestKdfData {
|
||||
state: kdf_state,
|
||||
ewma_errors_per_min: round3(kdf_ewma),
|
||||
threshold_errors_per_min: KDF_EWMA_THRESHOLD_ERRORS_PER_MIN,
|
||||
errors_total: kdf_errors_total,
|
||||
},
|
||||
timeskew: RuntimeMeSelftestTimeskewData {
|
||||
state: timeskew_state,
|
||||
max_skew_secs_15m: skew.max_skew_secs_15m,
|
||||
samples_15m: skew.samples_15m,
|
||||
last_skew_secs: skew.last_skew_secs,
|
||||
last_source: skew.last_source,
|
||||
last_seen_age_secs: skew.last_seen_age_secs,
|
||||
},
|
||||
ip: RuntimeMeSelftestIpData {
|
||||
v4: ip_v4,
|
||||
v6: ip_v6,
|
||||
},
|
||||
pid: RuntimeMeSelftestPidData {
|
||||
pid,
|
||||
state: pid_state,
|
||||
},
|
||||
bnd,
|
||||
upstreams,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_upstream_selftest_data(shared: &ApiShared) -> Option<Vec<RuntimeMeSelftestUpstreamData>> {
|
||||
let snapshot = shared.upstream_manager.try_api_snapshot()?;
|
||||
if snapshot.summary.configured_total <= 1 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut upstream_bnd_by_id: HashMap<usize, _> = upstream_bnd_snapshots()
|
||||
.into_iter()
|
||||
.map(|entry| (entry.upstream_id, entry))
|
||||
.collect();
|
||||
let mut rows = Vec::with_capacity(snapshot.upstreams.len());
|
||||
for upstream in snapshot.upstreams {
|
||||
let upstream_bnd = upstream_bnd_by_id.remove(&upstream.upstream_id);
|
||||
rows.push(RuntimeMeSelftestUpstreamData {
|
||||
upstream_id: upstream.upstream_id,
|
||||
route_kind: map_route_kind(upstream.route_kind),
|
||||
address: upstream.address,
|
||||
bnd: upstream_bnd.as_ref().map(|entry| RuntimeMeSelftestBndData {
|
||||
addr_state: entry.addr_status,
|
||||
port_state: entry.port_status,
|
||||
last_addr: entry.last_addr.map(|value| value.to_string()),
|
||||
last_seen_age_secs: entry.last_seen_age_secs,
|
||||
}),
|
||||
ip: upstream_bnd.and_then(|entry| entry.last_ip.map(|value| value.to_string())),
|
||||
});
|
||||
}
|
||||
Some(rows)
|
||||
}
|
||||
|
||||
fn update_kdf_ewma(now_epoch_secs: u64, total_errors: u64) -> f64 {
|
||||
let Ok(mut guard) = kdf_ewma_state().lock() else {
|
||||
return 0.0;
|
||||
};
|
||||
|
||||
if !guard.initialized {
|
||||
guard.initialized = true;
|
||||
guard.last_epoch_secs = now_epoch_secs;
|
||||
guard.last_total_errors = total_errors;
|
||||
guard.ewma_errors_per_min = 0.0;
|
||||
return guard.ewma_errors_per_min;
|
||||
}
|
||||
|
||||
let dt_secs = now_epoch_secs.saturating_sub(guard.last_epoch_secs);
|
||||
if dt_secs == 0 {
|
||||
return guard.ewma_errors_per_min;
|
||||
}
|
||||
|
||||
let delta_errors = total_errors.saturating_sub(guard.last_total_errors);
|
||||
let instant_rate_per_min = (delta_errors as f64) * 60.0 / (dt_secs as f64);
|
||||
let alpha = 1.0 - f64::exp(-(dt_secs as f64) / KDF_EWMA_TAU_SECS);
|
||||
guard.ewma_errors_per_min =
|
||||
guard.ewma_errors_per_min + alpha * (instant_rate_per_min - guard.ewma_errors_per_min);
|
||||
guard.last_epoch_secs = now_epoch_secs;
|
||||
guard.last_total_errors = total_errors;
|
||||
guard.ewma_errors_per_min
|
||||
}
|
||||
|
||||
fn classify_ip(ip: IpAddr) -> &'static str {
|
||||
if ip.is_loopback() {
|
||||
return "loopback";
|
||||
}
|
||||
if is_bogon(ip) {
|
||||
return "bogon";
|
||||
}
|
||||
"good"
|
||||
}
|
||||
|
||||
fn map_route_kind(value: UpstreamRouteKind) -> &'static str {
|
||||
match value {
|
||||
UpstreamRouteKind::Direct => "direct",
|
||||
UpstreamRouteKind::Socks4 => "socks4",
|
||||
UpstreamRouteKind::Socks5 => "socks5",
|
||||
UpstreamRouteKind::Shadowsocks => "shadowsocks",
|
||||
}
|
||||
}
|
||||
|
||||
fn round3(value: f64) -> f64 {
|
||||
(value * 1000.0).round() / 1000.0
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
|
|
@ -0,0 +1,567 @@
|
|||
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use crate::config::ApiConfig;
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::UpstreamRouteKind;
|
||||
use crate::transport::upstream::IpPreference;
|
||||
|
||||
use super::ApiShared;
|
||||
use super::model::{
|
||||
DcEndpointWriters, DcStatus, DcStatusData, MeWriterStatus, MeWritersData, MeWritersSummary,
|
||||
MinimalAllData, MinimalAllPayload, MinimalDcPathData, MinimalMeRuntimeData,
|
||||
MinimalQuarantineData, UpstreamDcStatus, UpstreamStatus, UpstreamSummaryData, UpstreamsData,
|
||||
ZeroAllData, ZeroCodeCount, ZeroCoreData, ZeroDesyncData, ZeroMiddleProxyData, ZeroPoolData,
|
||||
ZeroUpstreamData,
|
||||
};
|
||||
|
||||
const FEATURE_DISABLED_REASON: &str = "feature_disabled";
|
||||
const SOURCE_UNAVAILABLE_REASON: &str = "source_unavailable";
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct MinimalCacheEntry {
|
||||
pub(super) expires_at: Instant,
|
||||
pub(super) payload: MinimalAllPayload,
|
||||
pub(super) generated_at_epoch_secs: u64,
|
||||
}
|
||||
|
||||
pub(super) fn build_zero_all_data(stats: &Stats, configured_users: usize) -> ZeroAllData {
|
||||
let telemetry = stats.telemetry_policy();
|
||||
let handshake_error_codes = stats
|
||||
.get_me_handshake_error_code_counts()
|
||||
.into_iter()
|
||||
.map(|(code, total)| ZeroCodeCount { code, total })
|
||||
.collect();
|
||||
|
||||
ZeroAllData {
|
||||
generated_at_epoch_secs: now_epoch_secs(),
|
||||
core: ZeroCoreData {
|
||||
uptime_seconds: stats.uptime_secs(),
|
||||
connections_total: stats.get_connects_all(),
|
||||
connections_bad_total: stats.get_connects_bad(),
|
||||
handshake_timeouts_total: stats.get_handshake_timeouts(),
|
||||
accept_permit_timeout_total: stats.get_accept_permit_timeout_total(),
|
||||
configured_users,
|
||||
telemetry_core_enabled: telemetry.core_enabled,
|
||||
telemetry_user_enabled: telemetry.user_enabled,
|
||||
telemetry_me_level: telemetry.me_level.to_string(),
|
||||
conntrack_control_enabled: stats.get_conntrack_control_enabled(),
|
||||
conntrack_control_available: stats.get_conntrack_control_available(),
|
||||
conntrack_pressure_active: stats.get_conntrack_pressure_active(),
|
||||
conntrack_event_queue_depth: stats.get_conntrack_event_queue_depth(),
|
||||
conntrack_rule_apply_ok: stats.get_conntrack_rule_apply_ok(),
|
||||
conntrack_delete_attempt_total: stats.get_conntrack_delete_attempt_total(),
|
||||
conntrack_delete_success_total: stats.get_conntrack_delete_success_total(),
|
||||
conntrack_delete_not_found_total: stats.get_conntrack_delete_not_found_total(),
|
||||
conntrack_delete_error_total: stats.get_conntrack_delete_error_total(),
|
||||
conntrack_close_event_drop_total: stats.get_conntrack_close_event_drop_total(),
|
||||
},
|
||||
upstream: build_zero_upstream_data(stats),
|
||||
middle_proxy: ZeroMiddleProxyData {
|
||||
keepalive_sent_total: stats.get_me_keepalive_sent(),
|
||||
keepalive_failed_total: stats.get_me_keepalive_failed(),
|
||||
keepalive_pong_total: stats.get_me_keepalive_pong(),
|
||||
keepalive_timeout_total: stats.get_me_keepalive_timeout(),
|
||||
rpc_proxy_req_signal_sent_total: stats.get_me_rpc_proxy_req_signal_sent_total(),
|
||||
rpc_proxy_req_signal_failed_total: stats.get_me_rpc_proxy_req_signal_failed_total(),
|
||||
rpc_proxy_req_signal_skipped_no_meta_total: stats
|
||||
.get_me_rpc_proxy_req_signal_skipped_no_meta_total(),
|
||||
rpc_proxy_req_signal_response_total: stats.get_me_rpc_proxy_req_signal_response_total(),
|
||||
rpc_proxy_req_signal_close_sent_total: stats
|
||||
.get_me_rpc_proxy_req_signal_close_sent_total(),
|
||||
reconnect_attempt_total: stats.get_me_reconnect_attempts(),
|
||||
reconnect_success_total: stats.get_me_reconnect_success(),
|
||||
handshake_reject_total: stats.get_me_handshake_reject_total(),
|
||||
handshake_error_codes,
|
||||
reader_eof_total: stats.get_me_reader_eof_total(),
|
||||
idle_close_by_peer_total: stats.get_me_idle_close_by_peer_total(),
|
||||
route_drop_no_conn_total: stats.get_me_route_drop_no_conn(),
|
||||
route_drop_channel_closed_total: stats.get_me_route_drop_channel_closed(),
|
||||
route_drop_queue_full_total: stats.get_me_route_drop_queue_full(),
|
||||
route_drop_queue_full_base_total: stats.get_me_route_drop_queue_full_base(),
|
||||
route_drop_queue_full_high_total: stats.get_me_route_drop_queue_full_high(),
|
||||
d2c_batches_total: stats.get_me_d2c_batches_total(),
|
||||
d2c_batch_frames_total: stats.get_me_d2c_batch_frames_total(),
|
||||
d2c_batch_bytes_total: stats.get_me_d2c_batch_bytes_total(),
|
||||
d2c_flush_reason_queue_drain_total: stats.get_me_d2c_flush_reason_queue_drain_total(),
|
||||
d2c_flush_reason_batch_frames_total: stats.get_me_d2c_flush_reason_batch_frames_total(),
|
||||
d2c_flush_reason_batch_bytes_total: stats.get_me_d2c_flush_reason_batch_bytes_total(),
|
||||
d2c_flush_reason_max_delay_total: stats.get_me_d2c_flush_reason_max_delay_total(),
|
||||
d2c_flush_reason_ack_immediate_total: stats
|
||||
.get_me_d2c_flush_reason_ack_immediate_total(),
|
||||
d2c_flush_reason_close_total: stats.get_me_d2c_flush_reason_close_total(),
|
||||
d2c_data_frames_total: stats.get_me_d2c_data_frames_total(),
|
||||
d2c_ack_frames_total: stats.get_me_d2c_ack_frames_total(),
|
||||
d2c_payload_bytes_total: stats.get_me_d2c_payload_bytes_total(),
|
||||
d2c_write_mode_coalesced_total: stats.get_me_d2c_write_mode_coalesced_total(),
|
||||
d2c_write_mode_split_total: stats.get_me_d2c_write_mode_split_total(),
|
||||
d2c_quota_reject_pre_write_total: stats.get_me_d2c_quota_reject_pre_write_total(),
|
||||
d2c_quota_reject_post_write_total: stats.get_me_d2c_quota_reject_post_write_total(),
|
||||
d2c_frame_buf_shrink_total: stats.get_me_d2c_frame_buf_shrink_total(),
|
||||
d2c_frame_buf_shrink_bytes_total: stats.get_me_d2c_frame_buf_shrink_bytes_total(),
|
||||
socks_kdf_strict_reject_total: stats.get_me_socks_kdf_strict_reject(),
|
||||
socks_kdf_compat_fallback_total: stats.get_me_socks_kdf_compat_fallback(),
|
||||
endpoint_quarantine_total: stats.get_me_endpoint_quarantine_total(),
|
||||
kdf_drift_total: stats.get_me_kdf_drift_total(),
|
||||
kdf_port_only_drift_total: stats.get_me_kdf_port_only_drift_total(),
|
||||
hardswap_pending_reuse_total: stats.get_me_hardswap_pending_reuse_total(),
|
||||
hardswap_pending_ttl_expired_total: stats.get_me_hardswap_pending_ttl_expired_total(),
|
||||
single_endpoint_outage_enter_total: stats.get_me_single_endpoint_outage_enter_total(),
|
||||
single_endpoint_outage_exit_total: stats.get_me_single_endpoint_outage_exit_total(),
|
||||
single_endpoint_outage_reconnect_attempt_total: stats
|
||||
.get_me_single_endpoint_outage_reconnect_attempt_total(),
|
||||
single_endpoint_outage_reconnect_success_total: stats
|
||||
.get_me_single_endpoint_outage_reconnect_success_total(),
|
||||
single_endpoint_quarantine_bypass_total: stats
|
||||
.get_me_single_endpoint_quarantine_bypass_total(),
|
||||
single_endpoint_shadow_rotate_total: stats.get_me_single_endpoint_shadow_rotate_total(),
|
||||
single_endpoint_shadow_rotate_skipped_quarantine_total: stats
|
||||
.get_me_single_endpoint_shadow_rotate_skipped_quarantine_total(),
|
||||
floor_mode_switch_total: stats.get_me_floor_mode_switch_total(),
|
||||
floor_mode_switch_static_to_adaptive_total: stats
|
||||
.get_me_floor_mode_switch_static_to_adaptive_total(),
|
||||
floor_mode_switch_adaptive_to_static_total: stats
|
||||
.get_me_floor_mode_switch_adaptive_to_static_total(),
|
||||
},
|
||||
pool: ZeroPoolData {
|
||||
pool_swap_total: stats.get_pool_swap_total(),
|
||||
pool_drain_active: stats.get_pool_drain_active(),
|
||||
pool_force_close_total: stats.get_pool_force_close_total(),
|
||||
pool_stale_pick_total: stats.get_pool_stale_pick_total(),
|
||||
writer_removed_total: stats.get_me_writer_removed_total(),
|
||||
writer_removed_unexpected_total: stats.get_me_writer_removed_unexpected_total(),
|
||||
refill_triggered_total: stats.get_me_refill_triggered_total(),
|
||||
refill_skipped_inflight_total: stats.get_me_refill_skipped_inflight_total(),
|
||||
refill_failed_total: stats.get_me_refill_failed_total(),
|
||||
writer_restored_same_endpoint_total: stats.get_me_writer_restored_same_endpoint_total(),
|
||||
writer_restored_fallback_total: stats.get_me_writer_restored_fallback_total(),
|
||||
},
|
||||
desync: ZeroDesyncData {
|
||||
secure_padding_invalid_total: stats.get_secure_padding_invalid(),
|
||||
desync_total: stats.get_desync_total(),
|
||||
desync_full_logged_total: stats.get_desync_full_logged(),
|
||||
desync_suppressed_total: stats.get_desync_suppressed(),
|
||||
desync_frames_bucket_0: stats.get_desync_frames_bucket_0(),
|
||||
desync_frames_bucket_1_2: stats.get_desync_frames_bucket_1_2(),
|
||||
desync_frames_bucket_3_10: stats.get_desync_frames_bucket_3_10(),
|
||||
desync_frames_bucket_gt_10: stats.get_desync_frames_bucket_gt_10(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn build_zero_upstream_data(stats: &Stats) -> ZeroUpstreamData {
|
||||
ZeroUpstreamData {
|
||||
connect_attempt_total: stats.get_upstream_connect_attempt_total(),
|
||||
connect_success_total: stats.get_upstream_connect_success_total(),
|
||||
connect_fail_total: stats.get_upstream_connect_fail_total(),
|
||||
connect_failfast_hard_error_total: stats.get_upstream_connect_failfast_hard_error_total(),
|
||||
connect_attempts_bucket_1: stats.get_upstream_connect_attempts_bucket_1(),
|
||||
connect_attempts_bucket_2: stats.get_upstream_connect_attempts_bucket_2(),
|
||||
connect_attempts_bucket_3_4: stats.get_upstream_connect_attempts_bucket_3_4(),
|
||||
connect_attempts_bucket_gt_4: stats.get_upstream_connect_attempts_bucket_gt_4(),
|
||||
connect_duration_success_bucket_le_100ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_le_100ms(),
|
||||
connect_duration_success_bucket_101_500ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_101_500ms(),
|
||||
connect_duration_success_bucket_501_1000ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_501_1000ms(),
|
||||
connect_duration_success_bucket_gt_1000ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_gt_1000ms(),
|
||||
connect_duration_fail_bucket_le_100ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_le_100ms(),
|
||||
connect_duration_fail_bucket_101_500ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_101_500ms(),
|
||||
connect_duration_fail_bucket_501_1000ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_501_1000ms(),
|
||||
connect_duration_fail_bucket_gt_1000ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_gt_1000ms(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn build_upstreams_data(shared: &ApiShared, api_cfg: &ApiConfig) -> UpstreamsData {
|
||||
let generated_at_epoch_secs = now_epoch_secs();
|
||||
let zero = build_zero_upstream_data(&shared.stats);
|
||||
if !api_cfg.minimal_runtime_enabled {
|
||||
return UpstreamsData {
|
||||
enabled: false,
|
||||
reason: Some(FEATURE_DISABLED_REASON),
|
||||
generated_at_epoch_secs,
|
||||
zero,
|
||||
summary: None,
|
||||
upstreams: None,
|
||||
};
|
||||
}
|
||||
|
||||
let Some(snapshot) = shared.upstream_manager.try_api_snapshot() else {
|
||||
return UpstreamsData {
|
||||
enabled: true,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs,
|
||||
zero,
|
||||
summary: None,
|
||||
upstreams: None,
|
||||
};
|
||||
};
|
||||
|
||||
let summary = UpstreamSummaryData {
|
||||
configured_total: snapshot.summary.configured_total,
|
||||
healthy_total: snapshot.summary.healthy_total,
|
||||
unhealthy_total: snapshot.summary.unhealthy_total,
|
||||
direct_total: snapshot.summary.direct_total,
|
||||
socks4_total: snapshot.summary.socks4_total,
|
||||
socks5_total: snapshot.summary.socks5_total,
|
||||
shadowsocks_total: snapshot.summary.shadowsocks_total,
|
||||
};
|
||||
let upstreams = snapshot
|
||||
.upstreams
|
||||
.into_iter()
|
||||
.map(|upstream| UpstreamStatus {
|
||||
upstream_id: upstream.upstream_id,
|
||||
route_kind: map_route_kind(upstream.route_kind),
|
||||
address: upstream.address,
|
||||
weight: upstream.weight,
|
||||
scopes: upstream.scopes,
|
||||
healthy: upstream.healthy,
|
||||
fails: upstream.fails,
|
||||
last_check_age_secs: upstream.last_check_age_secs,
|
||||
effective_latency_ms: upstream.effective_latency_ms,
|
||||
dc: upstream
|
||||
.dc
|
||||
.into_iter()
|
||||
.map(|dc| UpstreamDcStatus {
|
||||
dc: dc.dc,
|
||||
latency_ema_ms: dc.latency_ema_ms,
|
||||
ip_preference: map_ip_preference(dc.ip_preference),
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
UpstreamsData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
zero,
|
||||
summary: Some(summary),
|
||||
upstreams: Some(upstreams),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_minimal_all_data(
|
||||
shared: &ApiShared,
|
||||
api_cfg: &ApiConfig,
|
||||
) -> MinimalAllData {
|
||||
let now = now_epoch_secs();
|
||||
if !api_cfg.minimal_runtime_enabled {
|
||||
return MinimalAllData {
|
||||
enabled: false,
|
||||
reason: Some(FEATURE_DISABLED_REASON),
|
||||
generated_at_epoch_secs: now,
|
||||
data: None,
|
||||
};
|
||||
}
|
||||
|
||||
let Some((generated_at_epoch_secs, payload)) =
|
||||
get_minimal_payload_cached(shared, api_cfg.minimal_runtime_cache_ttl_ms).await
|
||||
else {
|
||||
return MinimalAllData {
|
||||
enabled: true,
|
||||
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||
generated_at_epoch_secs: now,
|
||||
data: Some(MinimalAllPayload {
|
||||
me_writers: disabled_me_writers(now, SOURCE_UNAVAILABLE_REASON),
|
||||
dcs: disabled_dcs(now, SOURCE_UNAVAILABLE_REASON),
|
||||
me_runtime: None,
|
||||
network_path: Vec::new(),
|
||||
}),
|
||||
};
|
||||
};
|
||||
|
||||
MinimalAllData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
data: Some(payload),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_me_writers_data(
|
||||
shared: &ApiShared,
|
||||
api_cfg: &ApiConfig,
|
||||
) -> MeWritersData {
|
||||
let now = now_epoch_secs();
|
||||
if !api_cfg.minimal_runtime_enabled {
|
||||
return disabled_me_writers(now, FEATURE_DISABLED_REASON);
|
||||
}
|
||||
|
||||
let Some((_, payload)) =
|
||||
get_minimal_payload_cached(shared, api_cfg.minimal_runtime_cache_ttl_ms).await
|
||||
else {
|
||||
return disabled_me_writers(now, SOURCE_UNAVAILABLE_REASON);
|
||||
};
|
||||
payload.me_writers
|
||||
}
|
||||
|
||||
pub(super) async fn build_dcs_data(shared: &ApiShared, api_cfg: &ApiConfig) -> DcStatusData {
|
||||
let now = now_epoch_secs();
|
||||
if !api_cfg.minimal_runtime_enabled {
|
||||
return disabled_dcs(now, FEATURE_DISABLED_REASON);
|
||||
}
|
||||
|
||||
let Some((_, payload)) =
|
||||
get_minimal_payload_cached(shared, api_cfg.minimal_runtime_cache_ttl_ms).await
|
||||
else {
|
||||
return disabled_dcs(now, SOURCE_UNAVAILABLE_REASON);
|
||||
};
|
||||
payload.dcs
|
||||
}
|
||||
|
||||
async fn get_minimal_payload_cached(
|
||||
shared: &ApiShared,
|
||||
cache_ttl_ms: u64,
|
||||
) -> Option<(u64, MinimalAllPayload)> {
|
||||
if cache_ttl_ms > 0 {
|
||||
let now = Instant::now();
|
||||
let cached = shared.minimal_cache.lock().await.clone();
|
||||
if let Some(entry) = cached
|
||||
&& now < entry.expires_at
|
||||
{
|
||||
return Some((entry.generated_at_epoch_secs, entry.payload));
|
||||
}
|
||||
}
|
||||
|
||||
let pool = shared.me_pool.read().await.clone()?;
|
||||
let status = pool.api_status_snapshot().await;
|
||||
let runtime = pool.api_runtime_snapshot().await;
|
||||
let generated_at_epoch_secs = status.generated_at_epoch_secs;
|
||||
|
||||
let me_writers = MeWritersData {
|
||||
middle_proxy_enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
summary: MeWritersSummary {
|
||||
configured_dc_groups: status.configured_dc_groups,
|
||||
configured_endpoints: status.configured_endpoints,
|
||||
available_endpoints: status.available_endpoints,
|
||||
available_pct: status.available_pct,
|
||||
required_writers: status.required_writers,
|
||||
alive_writers: status.alive_writers,
|
||||
coverage_pct: status.coverage_pct,
|
||||
fresh_alive_writers: status.fresh_alive_writers,
|
||||
fresh_coverage_pct: status.fresh_coverage_pct,
|
||||
},
|
||||
writers: status
|
||||
.writers
|
||||
.into_iter()
|
||||
.map(|entry| MeWriterStatus {
|
||||
writer_id: entry.writer_id,
|
||||
dc: entry.dc,
|
||||
endpoint: entry.endpoint.to_string(),
|
||||
generation: entry.generation,
|
||||
state: entry.state,
|
||||
draining: entry.draining,
|
||||
degraded: entry.degraded,
|
||||
bound_clients: entry.bound_clients,
|
||||
idle_for_secs: entry.idle_for_secs,
|
||||
rtt_ema_ms: entry.rtt_ema_ms,
|
||||
matches_active_generation: entry.matches_active_generation,
|
||||
in_desired_map: entry.in_desired_map,
|
||||
allow_drain_fallback: entry.allow_drain_fallback,
|
||||
drain_started_at_epoch_secs: entry.drain_started_at_epoch_secs,
|
||||
drain_deadline_epoch_secs: entry.drain_deadline_epoch_secs,
|
||||
drain_over_ttl: entry.drain_over_ttl,
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
let dcs = DcStatusData {
|
||||
middle_proxy_enabled: true,
|
||||
reason: None,
|
||||
generated_at_epoch_secs,
|
||||
dcs: status
|
||||
.dcs
|
||||
.into_iter()
|
||||
.map(|entry| DcStatus {
|
||||
dc: entry.dc,
|
||||
endpoints: entry
|
||||
.endpoints
|
||||
.into_iter()
|
||||
.map(|value| value.to_string())
|
||||
.collect(),
|
||||
endpoint_writers: entry
|
||||
.endpoint_writers
|
||||
.into_iter()
|
||||
.map(|coverage| DcEndpointWriters {
|
||||
endpoint: coverage.endpoint.to_string(),
|
||||
active_writers: coverage.active_writers,
|
||||
})
|
||||
.collect(),
|
||||
available_endpoints: entry.available_endpoints,
|
||||
available_pct: entry.available_pct,
|
||||
required_writers: entry.required_writers,
|
||||
floor_min: entry.floor_min,
|
||||
floor_target: entry.floor_target,
|
||||
floor_max: entry.floor_max,
|
||||
floor_capped: entry.floor_capped,
|
||||
alive_writers: entry.alive_writers,
|
||||
coverage_pct: entry.coverage_pct,
|
||||
fresh_alive_writers: entry.fresh_alive_writers,
|
||||
fresh_coverage_pct: entry.fresh_coverage_pct,
|
||||
rtt_ms: entry.rtt_ms,
|
||||
load: entry.load,
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
let me_runtime = MinimalMeRuntimeData {
|
||||
active_generation: runtime.active_generation,
|
||||
warm_generation: runtime.warm_generation,
|
||||
pending_hardswap_generation: runtime.pending_hardswap_generation,
|
||||
pending_hardswap_age_secs: runtime.pending_hardswap_age_secs,
|
||||
hardswap_enabled: runtime.hardswap_enabled,
|
||||
floor_mode: runtime.floor_mode,
|
||||
adaptive_floor_idle_secs: runtime.adaptive_floor_idle_secs,
|
||||
adaptive_floor_min_writers_single_endpoint: runtime
|
||||
.adaptive_floor_min_writers_single_endpoint,
|
||||
adaptive_floor_min_writers_multi_endpoint: runtime
|
||||
.adaptive_floor_min_writers_multi_endpoint,
|
||||
adaptive_floor_recover_grace_secs: runtime.adaptive_floor_recover_grace_secs,
|
||||
adaptive_floor_writers_per_core_total: runtime.adaptive_floor_writers_per_core_total,
|
||||
adaptive_floor_cpu_cores_override: runtime.adaptive_floor_cpu_cores_override,
|
||||
adaptive_floor_max_extra_writers_single_per_core: runtime
|
||||
.adaptive_floor_max_extra_writers_single_per_core,
|
||||
adaptive_floor_max_extra_writers_multi_per_core: runtime
|
||||
.adaptive_floor_max_extra_writers_multi_per_core,
|
||||
adaptive_floor_max_active_writers_per_core: runtime
|
||||
.adaptive_floor_max_active_writers_per_core,
|
||||
adaptive_floor_max_warm_writers_per_core: runtime.adaptive_floor_max_warm_writers_per_core,
|
||||
adaptive_floor_max_active_writers_global: runtime.adaptive_floor_max_active_writers_global,
|
||||
adaptive_floor_max_warm_writers_global: runtime.adaptive_floor_max_warm_writers_global,
|
||||
adaptive_floor_cpu_cores_detected: runtime.adaptive_floor_cpu_cores_detected,
|
||||
adaptive_floor_cpu_cores_effective: runtime.adaptive_floor_cpu_cores_effective,
|
||||
adaptive_floor_global_cap_raw: runtime.adaptive_floor_global_cap_raw,
|
||||
adaptive_floor_global_cap_effective: runtime.adaptive_floor_global_cap_effective,
|
||||
adaptive_floor_target_writers_total: runtime.adaptive_floor_target_writers_total,
|
||||
adaptive_floor_active_cap_configured: runtime.adaptive_floor_active_cap_configured,
|
||||
adaptive_floor_active_cap_effective: runtime.adaptive_floor_active_cap_effective,
|
||||
adaptive_floor_warm_cap_configured: runtime.adaptive_floor_warm_cap_configured,
|
||||
adaptive_floor_warm_cap_effective: runtime.adaptive_floor_warm_cap_effective,
|
||||
adaptive_floor_active_writers_current: runtime.adaptive_floor_active_writers_current,
|
||||
adaptive_floor_warm_writers_current: runtime.adaptive_floor_warm_writers_current,
|
||||
me_keepalive_enabled: runtime.me_keepalive_enabled,
|
||||
me_keepalive_interval_secs: runtime.me_keepalive_interval_secs,
|
||||
me_keepalive_jitter_secs: runtime.me_keepalive_jitter_secs,
|
||||
me_keepalive_payload_random: runtime.me_keepalive_payload_random,
|
||||
rpc_proxy_req_every_secs: runtime.rpc_proxy_req_every_secs,
|
||||
me_reconnect_max_concurrent_per_dc: runtime.me_reconnect_max_concurrent_per_dc,
|
||||
me_reconnect_backoff_base_ms: runtime.me_reconnect_backoff_base_ms,
|
||||
me_reconnect_backoff_cap_ms: runtime.me_reconnect_backoff_cap_ms,
|
||||
me_reconnect_fast_retry_count: runtime.me_reconnect_fast_retry_count,
|
||||
me_pool_drain_ttl_secs: runtime.me_pool_drain_ttl_secs,
|
||||
me_pool_force_close_secs: runtime.me_pool_force_close_secs,
|
||||
me_pool_min_fresh_ratio: runtime.me_pool_min_fresh_ratio,
|
||||
me_bind_stale_mode: runtime.me_bind_stale_mode,
|
||||
me_bind_stale_ttl_secs: runtime.me_bind_stale_ttl_secs,
|
||||
me_single_endpoint_shadow_writers: runtime.me_single_endpoint_shadow_writers,
|
||||
me_single_endpoint_outage_mode_enabled: runtime.me_single_endpoint_outage_mode_enabled,
|
||||
me_single_endpoint_outage_disable_quarantine: runtime
|
||||
.me_single_endpoint_outage_disable_quarantine,
|
||||
me_single_endpoint_outage_backoff_min_ms: runtime.me_single_endpoint_outage_backoff_min_ms,
|
||||
me_single_endpoint_outage_backoff_max_ms: runtime.me_single_endpoint_outage_backoff_max_ms,
|
||||
me_single_endpoint_shadow_rotate_every_secs: runtime
|
||||
.me_single_endpoint_shadow_rotate_every_secs,
|
||||
me_deterministic_writer_sort: runtime.me_deterministic_writer_sort,
|
||||
me_writer_pick_mode: runtime.me_writer_pick_mode,
|
||||
me_writer_pick_sample_size: runtime.me_writer_pick_sample_size,
|
||||
me_socks_kdf_policy: runtime.me_socks_kdf_policy,
|
||||
quarantined_endpoints_total: runtime.quarantined_endpoints.len(),
|
||||
quarantined_endpoints: runtime
|
||||
.quarantined_endpoints
|
||||
.into_iter()
|
||||
.map(|entry| MinimalQuarantineData {
|
||||
endpoint: entry.endpoint.to_string(),
|
||||
remaining_ms: entry.remaining_ms,
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
let network_path = runtime
|
||||
.network_path
|
||||
.into_iter()
|
||||
.map(|entry| MinimalDcPathData {
|
||||
dc: entry.dc,
|
||||
ip_preference: entry.ip_preference,
|
||||
selected_addr_v4: entry.selected_addr_v4.map(|value| value.to_string()),
|
||||
selected_addr_v6: entry.selected_addr_v6.map(|value| value.to_string()),
|
||||
})
|
||||
.collect();
|
||||
|
||||
let payload = MinimalAllPayload {
|
||||
me_writers,
|
||||
dcs,
|
||||
me_runtime: Some(me_runtime),
|
||||
network_path,
|
||||
};
|
||||
|
||||
if cache_ttl_ms > 0 {
|
||||
let entry = MinimalCacheEntry {
|
||||
expires_at: Instant::now() + Duration::from_millis(cache_ttl_ms),
|
||||
payload: payload.clone(),
|
||||
generated_at_epoch_secs,
|
||||
};
|
||||
*shared.minimal_cache.lock().await = Some(entry);
|
||||
}
|
||||
|
||||
Some((generated_at_epoch_secs, payload))
|
||||
}
|
||||
|
||||
fn disabled_me_writers(now_epoch_secs: u64, reason: &'static str) -> MeWritersData {
|
||||
MeWritersData {
|
||||
middle_proxy_enabled: false,
|
||||
reason: Some(reason),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
summary: MeWritersSummary {
|
||||
configured_dc_groups: 0,
|
||||
configured_endpoints: 0,
|
||||
available_endpoints: 0,
|
||||
available_pct: 0.0,
|
||||
required_writers: 0,
|
||||
alive_writers: 0,
|
||||
coverage_pct: 0.0,
|
||||
fresh_alive_writers: 0,
|
||||
fresh_coverage_pct: 0.0,
|
||||
},
|
||||
writers: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn disabled_dcs(now_epoch_secs: u64, reason: &'static str) -> DcStatusData {
|
||||
DcStatusData {
|
||||
middle_proxy_enabled: false,
|
||||
reason: Some(reason),
|
||||
generated_at_epoch_secs: now_epoch_secs,
|
||||
dcs: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_route_kind(value: UpstreamRouteKind) -> &'static str {
|
||||
match value {
|
||||
UpstreamRouteKind::Direct => "direct",
|
||||
UpstreamRouteKind::Socks4 => "socks4",
|
||||
UpstreamRouteKind::Socks5 => "socks5",
|
||||
UpstreamRouteKind::Shadowsocks => "shadowsocks",
|
||||
}
|
||||
}
|
||||
|
||||
fn map_ip_preference(value: IpPreference) -> &'static str {
|
||||
match value {
|
||||
IpPreference::Unknown => "unknown",
|
||||
IpPreference::PreferV6 => "prefer_v6",
|
||||
IpPreference::PreferV4 => "prefer_v4",
|
||||
IpPreference::BothWork => "both_work",
|
||||
IpPreference::Unavailable => "unavailable",
|
||||
}
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
use std::sync::Arc;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use tokio::sync::watch;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
|
||||
use super::ApiRuntimeState;
|
||||
use super::events::ApiEventStore;
|
||||
|
||||
pub(super) fn spawn_runtime_watchers(
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
admission_rx: watch::Receiver<bool>,
|
||||
runtime_state: Arc<ApiRuntimeState>,
|
||||
runtime_events: Arc<ApiEventStore>,
|
||||
) {
|
||||
let mut config_rx_reload = config_rx;
|
||||
let runtime_state_reload = runtime_state.clone();
|
||||
let runtime_events_reload = runtime_events.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if config_rx_reload.changed().await.is_err() {
|
||||
break;
|
||||
}
|
||||
runtime_state_reload
|
||||
.config_reload_count
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
runtime_state_reload
|
||||
.last_config_reload_epoch_secs
|
||||
.store(now_epoch_secs(), Ordering::Relaxed);
|
||||
runtime_events_reload.record("config.reload.applied", "config receiver updated");
|
||||
}
|
||||
});
|
||||
|
||||
let mut admission_rx_watch = admission_rx;
|
||||
tokio::spawn(async move {
|
||||
runtime_state
|
||||
.admission_open
|
||||
.store(*admission_rx_watch.borrow(), Ordering::Relaxed);
|
||||
runtime_events.record(
|
||||
"admission.state",
|
||||
format!("accepting_new_connections={}", *admission_rx_watch.borrow()),
|
||||
);
|
||||
loop {
|
||||
if admission_rx_watch.changed().await.is_err() {
|
||||
break;
|
||||
}
|
||||
let admission_open = *admission_rx_watch.borrow();
|
||||
runtime_state
|
||||
.admission_open
|
||||
.store(admission_open, Ordering::Relaxed);
|
||||
runtime_events.record(
|
||||
"admission.state",
|
||||
format!("accepting_new_connections={}", admission_open),
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
|
|
@ -0,0 +1,338 @@
|
|||
use std::sync::atomic::Ordering;
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::config::{MeFloorMode, MeWriterPickMode, ProxyConfig, UserMaxUniqueIpsMode};
|
||||
use crate::proxy::route_mode::RelayRouteMode;
|
||||
|
||||
use super::ApiShared;
|
||||
use super::runtime_init::build_runtime_startup_summary;
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct SystemInfoData {
|
||||
pub(super) version: String,
|
||||
pub(super) target_arch: String,
|
||||
pub(super) target_os: String,
|
||||
pub(super) build_profile: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) git_commit: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) build_time_utc: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) rustc_version: Option<String>,
|
||||
pub(super) process_started_at_epoch_secs: u64,
|
||||
pub(super) uptime_seconds: f64,
|
||||
pub(super) config_path: String,
|
||||
pub(super) config_hash: String,
|
||||
pub(super) config_reload_count: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) last_config_reload_epoch_secs: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeGatesData {
|
||||
pub(super) accepting_new_connections: bool,
|
||||
pub(super) conditional_cast_enabled: bool,
|
||||
pub(super) me_runtime_ready: bool,
|
||||
pub(super) me2dc_fallback_enabled: bool,
|
||||
pub(super) me2dc_fast_enabled: bool,
|
||||
pub(super) use_middle_proxy: bool,
|
||||
pub(super) route_mode: &'static str,
|
||||
pub(super) reroute_active: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reroute_to_direct_at_epoch_secs: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reroute_reason: Option<&'static str>,
|
||||
pub(super) startup_status: &'static str,
|
||||
pub(super) startup_stage: String,
|
||||
pub(super) startup_progress_pct: f64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveTimeoutLimits {
|
||||
pub(super) client_first_byte_idle_secs: u64,
|
||||
pub(super) client_handshake_secs: u64,
|
||||
pub(super) tg_connect_secs: u64,
|
||||
pub(super) client_keepalive_secs: u64,
|
||||
pub(super) client_ack_secs: u64,
|
||||
pub(super) me_one_retry: u8,
|
||||
pub(super) me_one_timeout_ms: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveUpstreamLimits {
|
||||
pub(super) connect_retry_attempts: u32,
|
||||
pub(super) connect_retry_backoff_ms: u64,
|
||||
pub(super) connect_budget_ms: u64,
|
||||
pub(super) unhealthy_fail_threshold: u32,
|
||||
pub(super) connect_failfast_hard_errors: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveMiddleProxyLimits {
|
||||
pub(super) floor_mode: &'static str,
|
||||
pub(super) adaptive_floor_idle_secs: u64,
|
||||
pub(super) adaptive_floor_min_writers_single_endpoint: u8,
|
||||
pub(super) adaptive_floor_min_writers_multi_endpoint: u8,
|
||||
pub(super) adaptive_floor_recover_grace_secs: u64,
|
||||
pub(super) adaptive_floor_writers_per_core_total: u16,
|
||||
pub(super) adaptive_floor_cpu_cores_override: u16,
|
||||
pub(super) adaptive_floor_max_extra_writers_single_per_core: u16,
|
||||
pub(super) adaptive_floor_max_extra_writers_multi_per_core: u16,
|
||||
pub(super) adaptive_floor_max_active_writers_per_core: u16,
|
||||
pub(super) adaptive_floor_max_warm_writers_per_core: u16,
|
||||
pub(super) adaptive_floor_max_active_writers_global: u32,
|
||||
pub(super) adaptive_floor_max_warm_writers_global: u32,
|
||||
pub(super) reconnect_max_concurrent_per_dc: u32,
|
||||
pub(super) reconnect_backoff_base_ms: u64,
|
||||
pub(super) reconnect_backoff_cap_ms: u64,
|
||||
pub(super) reconnect_fast_retry_count: u32,
|
||||
pub(super) writer_pick_mode: &'static str,
|
||||
pub(super) writer_pick_sample_size: u8,
|
||||
pub(super) me2dc_fallback: bool,
|
||||
pub(super) me2dc_fast: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveUserIpPolicyLimits {
|
||||
pub(super) global_each: usize,
|
||||
pub(super) mode: &'static str,
|
||||
pub(super) window_secs: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveUserTcpPolicyLimits {
|
||||
pub(super) global_each: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveLimitsData {
|
||||
pub(super) update_every_secs: u64,
|
||||
pub(super) me_reinit_every_secs: u64,
|
||||
pub(super) me_pool_force_close_secs: u64,
|
||||
pub(super) timeouts: EffectiveTimeoutLimits,
|
||||
pub(super) upstream: EffectiveUpstreamLimits,
|
||||
pub(super) middle_proxy: EffectiveMiddleProxyLimits,
|
||||
pub(super) user_ip_policy: EffectiveUserIpPolicyLimits,
|
||||
pub(super) user_tcp_policy: EffectiveUserTcpPolicyLimits,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct SecurityPostureData {
|
||||
pub(super) api_read_only: bool,
|
||||
pub(super) api_whitelist_enabled: bool,
|
||||
pub(super) api_whitelist_entries: usize,
|
||||
pub(super) api_auth_header_enabled: bool,
|
||||
pub(super) proxy_protocol_enabled: bool,
|
||||
pub(super) log_level: String,
|
||||
pub(super) telemetry_core_enabled: bool,
|
||||
pub(super) telemetry_user_enabled: bool,
|
||||
pub(super) telemetry_me_level: String,
|
||||
}
|
||||
|
||||
pub(super) fn build_system_info_data(
|
||||
shared: &ApiShared,
|
||||
_cfg: &ProxyConfig,
|
||||
revision: &str,
|
||||
) -> SystemInfoData {
|
||||
let last_reload_epoch_secs = shared
|
||||
.runtime_state
|
||||
.last_config_reload_epoch_secs
|
||||
.load(Ordering::Relaxed);
|
||||
let last_config_reload_epoch_secs =
|
||||
(last_reload_epoch_secs > 0).then_some(last_reload_epoch_secs);
|
||||
|
||||
let git_commit = option_env!("TELEMT_GIT_COMMIT")
|
||||
.or(option_env!("VERGEN_GIT_SHA"))
|
||||
.or(option_env!("GIT_COMMIT"))
|
||||
.map(ToString::to_string);
|
||||
let build_time_utc = option_env!("BUILD_TIME_UTC")
|
||||
.or(option_env!("VERGEN_BUILD_TIMESTAMP"))
|
||||
.map(ToString::to_string);
|
||||
let rustc_version = option_env!("RUSTC_VERSION")
|
||||
.or(option_env!("VERGEN_RUSTC_SEMVER"))
|
||||
.map(ToString::to_string);
|
||||
|
||||
SystemInfoData {
|
||||
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||
target_arch: std::env::consts::ARCH.to_string(),
|
||||
target_os: std::env::consts::OS.to_string(),
|
||||
build_profile: option_env!("PROFILE").unwrap_or("unknown").to_string(),
|
||||
git_commit,
|
||||
build_time_utc,
|
||||
rustc_version,
|
||||
process_started_at_epoch_secs: shared.runtime_state.process_started_at_epoch_secs,
|
||||
uptime_seconds: shared.stats.uptime_secs(),
|
||||
config_path: shared.config_path.display().to_string(),
|
||||
config_hash: revision.to_string(),
|
||||
config_reload_count: shared
|
||||
.runtime_state
|
||||
.config_reload_count
|
||||
.load(Ordering::Relaxed),
|
||||
last_config_reload_epoch_secs,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn build_runtime_gates_data(
|
||||
shared: &ApiShared,
|
||||
cfg: &ProxyConfig,
|
||||
) -> RuntimeGatesData {
|
||||
let startup_summary = build_runtime_startup_summary(shared).await;
|
||||
let route_state = shared.route_runtime.snapshot();
|
||||
let route_mode = route_state.mode.as_str();
|
||||
let fast_fallback_enabled =
|
||||
cfg.general.use_middle_proxy && cfg.general.me2dc_fallback && cfg.general.me2dc_fast;
|
||||
let reroute_active = cfg.general.use_middle_proxy
|
||||
&& cfg.general.me2dc_fallback
|
||||
&& matches!(route_state.mode, RelayRouteMode::Direct);
|
||||
let reroute_to_direct_at_epoch_secs = if reroute_active {
|
||||
shared.route_runtime.direct_since_epoch_secs()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let reroute_reason = if reroute_active {
|
||||
if fast_fallback_enabled {
|
||||
Some("fast_not_ready_fallback")
|
||||
} else {
|
||||
Some("strict_grace_fallback")
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let me_runtime_ready = if !cfg.general.use_middle_proxy {
|
||||
true
|
||||
} else {
|
||||
shared
|
||||
.me_pool
|
||||
.read()
|
||||
.await
|
||||
.as_ref()
|
||||
.map(|pool| pool.is_runtime_ready())
|
||||
.unwrap_or(false)
|
||||
};
|
||||
|
||||
RuntimeGatesData {
|
||||
accepting_new_connections: shared.runtime_state.admission_open.load(Ordering::Relaxed),
|
||||
conditional_cast_enabled: cfg.general.use_middle_proxy,
|
||||
me_runtime_ready,
|
||||
me2dc_fallback_enabled: cfg.general.me2dc_fallback,
|
||||
me2dc_fast_enabled: fast_fallback_enabled,
|
||||
use_middle_proxy: cfg.general.use_middle_proxy,
|
||||
route_mode,
|
||||
reroute_active,
|
||||
reroute_to_direct_at_epoch_secs,
|
||||
reroute_reason,
|
||||
startup_status: startup_summary.status,
|
||||
startup_stage: startup_summary.stage,
|
||||
startup_progress_pct: startup_summary.progress_pct,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn build_limits_effective_data(cfg: &ProxyConfig) -> EffectiveLimitsData {
|
||||
EffectiveLimitsData {
|
||||
update_every_secs: cfg.general.effective_update_every_secs(),
|
||||
me_reinit_every_secs: cfg.general.effective_me_reinit_every_secs(),
|
||||
me_pool_force_close_secs: cfg.general.effective_me_pool_force_close_secs(),
|
||||
timeouts: EffectiveTimeoutLimits {
|
||||
client_first_byte_idle_secs: cfg.timeouts.client_first_byte_idle_secs,
|
||||
client_handshake_secs: cfg.timeouts.client_handshake,
|
||||
tg_connect_secs: cfg.general.tg_connect,
|
||||
client_keepalive_secs: cfg.timeouts.client_keepalive,
|
||||
client_ack_secs: cfg.timeouts.client_ack,
|
||||
me_one_retry: cfg.timeouts.me_one_retry,
|
||||
me_one_timeout_ms: cfg.timeouts.me_one_timeout_ms,
|
||||
},
|
||||
upstream: EffectiveUpstreamLimits {
|
||||
connect_retry_attempts: cfg.general.upstream_connect_retry_attempts,
|
||||
connect_retry_backoff_ms: cfg.general.upstream_connect_retry_backoff_ms,
|
||||
connect_budget_ms: cfg.general.upstream_connect_budget_ms,
|
||||
unhealthy_fail_threshold: cfg.general.upstream_unhealthy_fail_threshold,
|
||||
connect_failfast_hard_errors: cfg.general.upstream_connect_failfast_hard_errors,
|
||||
},
|
||||
middle_proxy: EffectiveMiddleProxyLimits {
|
||||
floor_mode: me_floor_mode_label(cfg.general.me_floor_mode),
|
||||
adaptive_floor_idle_secs: cfg.general.me_adaptive_floor_idle_secs,
|
||||
adaptive_floor_min_writers_single_endpoint: cfg
|
||||
.general
|
||||
.me_adaptive_floor_min_writers_single_endpoint,
|
||||
adaptive_floor_min_writers_multi_endpoint: cfg
|
||||
.general
|
||||
.me_adaptive_floor_min_writers_multi_endpoint,
|
||||
adaptive_floor_recover_grace_secs: cfg.general.me_adaptive_floor_recover_grace_secs,
|
||||
adaptive_floor_writers_per_core_total: cfg
|
||||
.general
|
||||
.me_adaptive_floor_writers_per_core_total,
|
||||
adaptive_floor_cpu_cores_override: cfg.general.me_adaptive_floor_cpu_cores_override,
|
||||
adaptive_floor_max_extra_writers_single_per_core: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_extra_writers_single_per_core,
|
||||
adaptive_floor_max_extra_writers_multi_per_core: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_extra_writers_multi_per_core,
|
||||
adaptive_floor_max_active_writers_per_core: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_active_writers_per_core,
|
||||
adaptive_floor_max_warm_writers_per_core: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_warm_writers_per_core,
|
||||
adaptive_floor_max_active_writers_global: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_active_writers_global,
|
||||
adaptive_floor_max_warm_writers_global: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_warm_writers_global,
|
||||
reconnect_max_concurrent_per_dc: cfg.general.me_reconnect_max_concurrent_per_dc,
|
||||
reconnect_backoff_base_ms: cfg.general.me_reconnect_backoff_base_ms,
|
||||
reconnect_backoff_cap_ms: cfg.general.me_reconnect_backoff_cap_ms,
|
||||
reconnect_fast_retry_count: cfg.general.me_reconnect_fast_retry_count,
|
||||
writer_pick_mode: me_writer_pick_mode_label(cfg.general.me_writer_pick_mode),
|
||||
writer_pick_sample_size: cfg.general.me_writer_pick_sample_size,
|
||||
me2dc_fallback: cfg.general.me2dc_fallback,
|
||||
me2dc_fast: cfg.general.me2dc_fast,
|
||||
},
|
||||
user_ip_policy: EffectiveUserIpPolicyLimits {
|
||||
global_each: cfg.access.user_max_unique_ips_global_each,
|
||||
mode: user_max_unique_ips_mode_label(cfg.access.user_max_unique_ips_mode),
|
||||
window_secs: cfg.access.user_max_unique_ips_window_secs,
|
||||
},
|
||||
user_tcp_policy: EffectiveUserTcpPolicyLimits {
|
||||
global_each: cfg.access.user_max_tcp_conns_global_each,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn build_security_posture_data(cfg: &ProxyConfig) -> SecurityPostureData {
|
||||
SecurityPostureData {
|
||||
api_read_only: cfg.server.api.read_only,
|
||||
api_whitelist_enabled: !cfg.server.api.whitelist.is_empty(),
|
||||
api_whitelist_entries: cfg.server.api.whitelist.len(),
|
||||
api_auth_header_enabled: !cfg.server.api.auth_header.is_empty(),
|
||||
proxy_protocol_enabled: cfg.server.proxy_protocol,
|
||||
log_level: cfg.general.log_level.to_string(),
|
||||
telemetry_core_enabled: cfg.general.telemetry.core_enabled,
|
||||
telemetry_user_enabled: cfg.general.telemetry.user_enabled,
|
||||
telemetry_me_level: cfg.general.telemetry.me_level.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn user_max_unique_ips_mode_label(mode: UserMaxUniqueIpsMode) -> &'static str {
|
||||
match mode {
|
||||
UserMaxUniqueIpsMode::ActiveWindow => "active_window",
|
||||
UserMaxUniqueIpsMode::TimeWindow => "time_window",
|
||||
UserMaxUniqueIpsMode::Combined => "combined",
|
||||
}
|
||||
}
|
||||
|
||||
fn me_floor_mode_label(mode: MeFloorMode) -> &'static str {
|
||||
match mode {
|
||||
MeFloorMode::Static => "static",
|
||||
MeFloorMode::Adaptive => "adaptive",
|
||||
}
|
||||
}
|
||||
|
||||
fn me_writer_pick_mode_label(mode: MeWriterPickMode) -> &'static str {
|
||||
match mode {
|
||||
MeWriterPickMode::SortedRr => "sorted_rr",
|
||||
MeWriterPickMode::P2c => "p2c",
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,687 @@
|
|||
use std::net::IpAddr;
|
||||
|
||||
use hyper::StatusCode;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::stats::Stats;
|
||||
|
||||
use super::ApiShared;
|
||||
use super::config_store::{
|
||||
AccessSection, ensure_expected_revision, load_config_from_disk, save_access_sections_to_disk,
|
||||
save_config_to_disk,
|
||||
};
|
||||
use super::model::{
|
||||
ApiFailure, CreateUserRequest, CreateUserResponse, PatchUserRequest, RotateSecretRequest,
|
||||
UserInfo, UserLinks, is_valid_ad_tag, is_valid_user_secret, is_valid_username,
|
||||
parse_optional_expiration, random_user_secret,
|
||||
};
|
||||
|
||||
pub(super) async fn create_user(
|
||||
body: CreateUserRequest,
|
||||
expected_revision: Option<String>,
|
||||
shared: &ApiShared,
|
||||
) -> Result<(CreateUserResponse, String), ApiFailure> {
|
||||
let touches_user_ad_tags = body.user_ad_tag.is_some();
|
||||
let touches_user_max_tcp_conns = body.max_tcp_conns.is_some();
|
||||
let touches_user_expirations = body.expiration_rfc3339.is_some();
|
||||
let touches_user_data_quota = body.data_quota_bytes.is_some();
|
||||
let touches_user_max_unique_ips = body.max_unique_ips.is_some();
|
||||
|
||||
if !is_valid_username(&body.username) {
|
||||
return Err(ApiFailure::bad_request(
|
||||
"username must match [A-Za-z0-9_.-] and be 1..64 chars",
|
||||
));
|
||||
}
|
||||
|
||||
let secret = match body.secret {
|
||||
Some(secret) => {
|
||||
if !is_valid_user_secret(&secret) {
|
||||
return Err(ApiFailure::bad_request(
|
||||
"secret must be exactly 32 hex characters",
|
||||
));
|
||||
}
|
||||
secret
|
||||
}
|
||||
None => random_user_secret(),
|
||||
};
|
||||
|
||||
if let Some(ad_tag) = body.user_ad_tag.as_ref()
|
||||
&& !is_valid_ad_tag(ad_tag)
|
||||
{
|
||||
return Err(ApiFailure::bad_request(
|
||||
"user_ad_tag must be exactly 32 hex characters",
|
||||
));
|
||||
}
|
||||
|
||||
let expiration = parse_optional_expiration(body.expiration_rfc3339.as_deref())?;
|
||||
let _guard = shared.mutation_lock.lock().await;
|
||||
let mut cfg = load_config_from_disk(&shared.config_path).await?;
|
||||
ensure_expected_revision(&shared.config_path, expected_revision.as_deref()).await?;
|
||||
|
||||
if cfg.access.users.contains_key(&body.username) {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::CONFLICT,
|
||||
"user_exists",
|
||||
"User already exists",
|
||||
));
|
||||
}
|
||||
|
||||
cfg.access
|
||||
.users
|
||||
.insert(body.username.clone(), secret.clone());
|
||||
if let Some(ad_tag) = body.user_ad_tag {
|
||||
cfg.access
|
||||
.user_ad_tags
|
||||
.insert(body.username.clone(), ad_tag);
|
||||
}
|
||||
if let Some(limit) = body.max_tcp_conns {
|
||||
cfg.access
|
||||
.user_max_tcp_conns
|
||||
.insert(body.username.clone(), limit);
|
||||
}
|
||||
if let Some(expiration) = expiration {
|
||||
cfg.access
|
||||
.user_expirations
|
||||
.insert(body.username.clone(), expiration);
|
||||
}
|
||||
if let Some(quota) = body.data_quota_bytes {
|
||||
cfg.access
|
||||
.user_data_quota
|
||||
.insert(body.username.clone(), quota);
|
||||
}
|
||||
|
||||
let updated_limit = body.max_unique_ips;
|
||||
if let Some(limit) = updated_limit {
|
||||
cfg.access
|
||||
.user_max_unique_ips
|
||||
.insert(body.username.clone(), limit);
|
||||
}
|
||||
|
||||
cfg.validate()
|
||||
.map_err(|e| ApiFailure::bad_request(format!("config validation failed: {}", e)))?;
|
||||
|
||||
let mut touched_sections = vec![AccessSection::Users];
|
||||
if touches_user_ad_tags {
|
||||
touched_sections.push(AccessSection::UserAdTags);
|
||||
}
|
||||
if touches_user_max_tcp_conns {
|
||||
touched_sections.push(AccessSection::UserMaxTcpConns);
|
||||
}
|
||||
if touches_user_expirations {
|
||||
touched_sections.push(AccessSection::UserExpirations);
|
||||
}
|
||||
if touches_user_data_quota {
|
||||
touched_sections.push(AccessSection::UserDataQuota);
|
||||
}
|
||||
if touches_user_max_unique_ips {
|
||||
touched_sections.push(AccessSection::UserMaxUniqueIps);
|
||||
}
|
||||
|
||||
let revision =
|
||||
save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
|
||||
drop(_guard);
|
||||
|
||||
if let Some(limit) = updated_limit {
|
||||
shared
|
||||
.ip_tracker
|
||||
.set_user_limit(&body.username, limit)
|
||||
.await;
|
||||
}
|
||||
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let user = users
|
||||
.into_iter()
|
||||
.find(|entry| entry.username == body.username)
|
||||
.unwrap_or(UserInfo {
|
||||
username: body.username.clone(),
|
||||
in_runtime: false,
|
||||
user_ad_tag: None,
|
||||
max_tcp_conns: cfg
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.get(&body.username)
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((cfg.access.user_max_tcp_conns_global_each > 0)
|
||||
.then_some(cfg.access.user_max_tcp_conns_global_each)),
|
||||
expiration_rfc3339: None,
|
||||
data_quota_bytes: None,
|
||||
max_unique_ips: updated_limit,
|
||||
current_connections: 0,
|
||||
active_unique_ips: 0,
|
||||
active_unique_ips_list: Vec::new(),
|
||||
recent_unique_ips: 0,
|
||||
recent_unique_ips_list: Vec::new(),
|
||||
total_octets: 0,
|
||||
links: build_user_links(&cfg, &secret, detected_ip_v4, detected_ip_v6),
|
||||
});
|
||||
|
||||
Ok((CreateUserResponse { user, secret }, revision))
|
||||
}
|
||||
|
||||
pub(super) async fn patch_user(
|
||||
user: &str,
|
||||
body: PatchUserRequest,
|
||||
expected_revision: Option<String>,
|
||||
shared: &ApiShared,
|
||||
) -> Result<(UserInfo, String), ApiFailure> {
|
||||
if let Some(secret) = body.secret.as_ref()
|
||||
&& !is_valid_user_secret(secret)
|
||||
{
|
||||
return Err(ApiFailure::bad_request(
|
||||
"secret must be exactly 32 hex characters",
|
||||
));
|
||||
}
|
||||
if let Some(ad_tag) = body.user_ad_tag.as_ref()
|
||||
&& !is_valid_ad_tag(ad_tag)
|
||||
{
|
||||
return Err(ApiFailure::bad_request(
|
||||
"user_ad_tag must be exactly 32 hex characters",
|
||||
));
|
||||
}
|
||||
let expiration = parse_optional_expiration(body.expiration_rfc3339.as_deref())?;
|
||||
let _guard = shared.mutation_lock.lock().await;
|
||||
let mut cfg = load_config_from_disk(&shared.config_path).await?;
|
||||
ensure_expected_revision(&shared.config_path, expected_revision.as_deref()).await?;
|
||||
|
||||
if !cfg.access.users.contains_key(user) {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::NOT_FOUND,
|
||||
"not_found",
|
||||
"User not found",
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(secret) = body.secret {
|
||||
cfg.access.users.insert(user.to_string(), secret);
|
||||
}
|
||||
if let Some(ad_tag) = body.user_ad_tag {
|
||||
cfg.access.user_ad_tags.insert(user.to_string(), ad_tag);
|
||||
}
|
||||
if let Some(limit) = body.max_tcp_conns {
|
||||
cfg.access
|
||||
.user_max_tcp_conns
|
||||
.insert(user.to_string(), limit);
|
||||
}
|
||||
if let Some(expiration) = expiration {
|
||||
cfg.access
|
||||
.user_expirations
|
||||
.insert(user.to_string(), expiration);
|
||||
}
|
||||
if let Some(quota) = body.data_quota_bytes {
|
||||
cfg.access.user_data_quota.insert(user.to_string(), quota);
|
||||
}
|
||||
|
||||
let mut updated_limit = None;
|
||||
if let Some(limit) = body.max_unique_ips {
|
||||
cfg.access
|
||||
.user_max_unique_ips
|
||||
.insert(user.to_string(), limit);
|
||||
updated_limit = Some(limit);
|
||||
}
|
||||
|
||||
cfg.validate()
|
||||
.map_err(|e| ApiFailure::bad_request(format!("config validation failed: {}", e)))?;
|
||||
|
||||
let revision = save_config_to_disk(&shared.config_path, &cfg).await?;
|
||||
drop(_guard);
|
||||
if let Some(limit) = updated_limit {
|
||||
shared.ip_tracker.set_user_limit(user, limit).await;
|
||||
}
|
||||
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let user_info = users
|
||||
.into_iter()
|
||||
.find(|entry| entry.username == user)
|
||||
.ok_or_else(|| ApiFailure::internal("failed to build updated user view"))?;
|
||||
|
||||
Ok((user_info, revision))
|
||||
}
|
||||
|
||||
pub(super) async fn rotate_secret(
|
||||
user: &str,
|
||||
body: RotateSecretRequest,
|
||||
expected_revision: Option<String>,
|
||||
shared: &ApiShared,
|
||||
) -> Result<(CreateUserResponse, String), ApiFailure> {
|
||||
let secret = body.secret.unwrap_or_else(random_user_secret);
|
||||
if !is_valid_user_secret(&secret) {
|
||||
return Err(ApiFailure::bad_request(
|
||||
"secret must be exactly 32 hex characters",
|
||||
));
|
||||
}
|
||||
|
||||
let _guard = shared.mutation_lock.lock().await;
|
||||
let mut cfg = load_config_from_disk(&shared.config_path).await?;
|
||||
ensure_expected_revision(&shared.config_path, expected_revision.as_deref()).await?;
|
||||
|
||||
if !cfg.access.users.contains_key(user) {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::NOT_FOUND,
|
||||
"not_found",
|
||||
"User not found",
|
||||
));
|
||||
}
|
||||
|
||||
cfg.access.users.insert(user.to_string(), secret.clone());
|
||||
cfg.validate()
|
||||
.map_err(|e| ApiFailure::bad_request(format!("config validation failed: {}", e)))?;
|
||||
let touched_sections = [
|
||||
AccessSection::Users,
|
||||
AccessSection::UserAdTags,
|
||||
AccessSection::UserMaxTcpConns,
|
||||
AccessSection::UserExpirations,
|
||||
AccessSection::UserDataQuota,
|
||||
AccessSection::UserMaxUniqueIps,
|
||||
];
|
||||
let revision =
|
||||
save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
|
||||
drop(_guard);
|
||||
|
||||
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let user_info = users
|
||||
.into_iter()
|
||||
.find(|entry| entry.username == user)
|
||||
.ok_or_else(|| ApiFailure::internal("failed to build updated user view"))?;
|
||||
|
||||
Ok((
|
||||
CreateUserResponse {
|
||||
user: user_info,
|
||||
secret,
|
||||
},
|
||||
revision,
|
||||
))
|
||||
}
|
||||
|
||||
pub(super) async fn delete_user(
|
||||
user: &str,
|
||||
expected_revision: Option<String>,
|
||||
shared: &ApiShared,
|
||||
) -> Result<(String, String), ApiFailure> {
|
||||
let _guard = shared.mutation_lock.lock().await;
|
||||
let mut cfg = load_config_from_disk(&shared.config_path).await?;
|
||||
ensure_expected_revision(&shared.config_path, expected_revision.as_deref()).await?;
|
||||
|
||||
if !cfg.access.users.contains_key(user) {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::NOT_FOUND,
|
||||
"not_found",
|
||||
"User not found",
|
||||
));
|
||||
}
|
||||
if cfg.access.users.len() <= 1 {
|
||||
return Err(ApiFailure::new(
|
||||
StatusCode::CONFLICT,
|
||||
"last_user_forbidden",
|
||||
"Cannot delete the last configured user",
|
||||
));
|
||||
}
|
||||
|
||||
cfg.access.users.remove(user);
|
||||
cfg.access.user_ad_tags.remove(user);
|
||||
cfg.access.user_max_tcp_conns.remove(user);
|
||||
cfg.access.user_expirations.remove(user);
|
||||
cfg.access.user_data_quota.remove(user);
|
||||
cfg.access.user_max_unique_ips.remove(user);
|
||||
|
||||
cfg.validate()
|
||||
.map_err(|e| ApiFailure::bad_request(format!("config validation failed: {}", e)))?;
|
||||
let touched_sections = [
|
||||
AccessSection::Users,
|
||||
AccessSection::UserAdTags,
|
||||
AccessSection::UserMaxTcpConns,
|
||||
AccessSection::UserExpirations,
|
||||
AccessSection::UserDataQuota,
|
||||
AccessSection::UserMaxUniqueIps,
|
||||
];
|
||||
let revision =
|
||||
save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
|
||||
drop(_guard);
|
||||
shared.ip_tracker.remove_user_limit(user).await;
|
||||
shared.ip_tracker.clear_user_ips(user).await;
|
||||
|
||||
Ok((user.to_string(), revision))
|
||||
}
|
||||
|
||||
pub(super) async fn users_from_config(
|
||||
cfg: &ProxyConfig,
|
||||
stats: &Stats,
|
||||
ip_tracker: &UserIpTracker,
|
||||
startup_detected_ip_v4: Option<IpAddr>,
|
||||
startup_detected_ip_v6: Option<IpAddr>,
|
||||
runtime_cfg: Option<&ProxyConfig>,
|
||||
) -> Vec<UserInfo> {
|
||||
let mut names = cfg.access.users.keys().cloned().collect::<Vec<_>>();
|
||||
names.sort();
|
||||
let active_ip_lists = ip_tracker.get_active_ips_for_users(&names).await;
|
||||
let recent_ip_lists = ip_tracker.get_recent_ips_for_users(&names).await;
|
||||
|
||||
let mut users = Vec::with_capacity(names.len());
|
||||
for username in names {
|
||||
let active_ip_list = active_ip_lists
|
||||
.get(&username)
|
||||
.cloned()
|
||||
.unwrap_or_else(Vec::new);
|
||||
let recent_ip_list = recent_ip_lists
|
||||
.get(&username)
|
||||
.cloned()
|
||||
.unwrap_or_else(Vec::new);
|
||||
let links = cfg
|
||||
.access
|
||||
.users
|
||||
.get(&username)
|
||||
.map(|secret| {
|
||||
build_user_links(cfg, secret, startup_detected_ip_v4, startup_detected_ip_v6)
|
||||
})
|
||||
.unwrap_or(UserLinks {
|
||||
classic: Vec::new(),
|
||||
secure: Vec::new(),
|
||||
tls: Vec::new(),
|
||||
});
|
||||
users.push(UserInfo {
|
||||
in_runtime: runtime_cfg
|
||||
.map(|runtime| runtime.access.users.contains_key(&username))
|
||||
.unwrap_or(false),
|
||||
user_ad_tag: cfg.access.user_ad_tags.get(&username).cloned(),
|
||||
max_tcp_conns: cfg
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.get(&username)
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((cfg.access.user_max_tcp_conns_global_each > 0)
|
||||
.then_some(cfg.access.user_max_tcp_conns_global_each)),
|
||||
expiration_rfc3339: cfg
|
||||
.access
|
||||
.user_expirations
|
||||
.get(&username)
|
||||
.map(chrono::DateTime::<chrono::Utc>::to_rfc3339),
|
||||
data_quota_bytes: cfg.access.user_data_quota.get(&username).copied(),
|
||||
max_unique_ips: cfg
|
||||
.access
|
||||
.user_max_unique_ips
|
||||
.get(&username)
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((cfg.access.user_max_unique_ips_global_each > 0)
|
||||
.then_some(cfg.access.user_max_unique_ips_global_each)),
|
||||
current_connections: stats.get_user_curr_connects(&username),
|
||||
active_unique_ips: active_ip_list.len(),
|
||||
active_unique_ips_list: active_ip_list,
|
||||
recent_unique_ips: recent_ip_list.len(),
|
||||
recent_unique_ips_list: recent_ip_list,
|
||||
total_octets: stats.get_user_total_octets(&username),
|
||||
links,
|
||||
username,
|
||||
});
|
||||
}
|
||||
users
|
||||
}
|
||||
|
||||
fn build_user_links(
|
||||
cfg: &ProxyConfig,
|
||||
secret: &str,
|
||||
startup_detected_ip_v4: Option<IpAddr>,
|
||||
startup_detected_ip_v6: Option<IpAddr>,
|
||||
) -> UserLinks {
|
||||
let hosts = resolve_link_hosts(cfg, startup_detected_ip_v4, startup_detected_ip_v6);
|
||||
let port = cfg.general.links.public_port.unwrap_or(cfg.server.port);
|
||||
let tls_domains = resolve_tls_domains(cfg);
|
||||
|
||||
let mut classic = Vec::new();
|
||||
let mut secure = Vec::new();
|
||||
let mut tls = Vec::new();
|
||||
|
||||
for host in &hosts {
|
||||
if cfg.general.modes.classic {
|
||||
classic.push(format!(
|
||||
"tg://proxy?server={}&port={}&secret={}",
|
||||
host, port, secret
|
||||
));
|
||||
}
|
||||
if cfg.general.modes.secure {
|
||||
secure.push(format!(
|
||||
"tg://proxy?server={}&port={}&secret=dd{}",
|
||||
host, port, secret
|
||||
));
|
||||
}
|
||||
if cfg.general.modes.tls {
|
||||
for domain in &tls_domains {
|
||||
let domain_hex = hex::encode(domain);
|
||||
tls.push(format!(
|
||||
"tg://proxy?server={}&port={}&secret=ee{}{}",
|
||||
host, port, secret, domain_hex
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
UserLinks {
|
||||
classic,
|
||||
secure,
|
||||
tls,
|
||||
}
|
||||
}
|
||||
|
||||
fn resolve_link_hosts(
|
||||
cfg: &ProxyConfig,
|
||||
startup_detected_ip_v4: Option<IpAddr>,
|
||||
startup_detected_ip_v6: Option<IpAddr>,
|
||||
) -> Vec<String> {
|
||||
if let Some(host) = cfg
|
||||
.general
|
||||
.links
|
||||
.public_host
|
||||
.as_deref()
|
||||
.map(str::trim)
|
||||
.filter(|value| !value.is_empty())
|
||||
{
|
||||
return vec![host.to_string()];
|
||||
}
|
||||
|
||||
let mut hosts = Vec::new();
|
||||
for listener in &cfg.server.listeners {
|
||||
if let Some(host) = listener
|
||||
.announce
|
||||
.as_deref()
|
||||
.map(str::trim)
|
||||
.filter(|value| !value.is_empty())
|
||||
{
|
||||
push_unique_host(&mut hosts, host);
|
||||
continue;
|
||||
}
|
||||
if let Some(ip) = listener.announce_ip
|
||||
&& !ip.is_unspecified()
|
||||
{
|
||||
push_unique_host(&mut hosts, &ip.to_string());
|
||||
continue;
|
||||
}
|
||||
if listener.ip.is_unspecified() {
|
||||
let detected_ip = if listener.ip.is_ipv4() {
|
||||
startup_detected_ip_v4
|
||||
} else {
|
||||
startup_detected_ip_v6
|
||||
};
|
||||
if let Some(ip) = detected_ip {
|
||||
push_unique_host(&mut hosts, &ip.to_string());
|
||||
} else {
|
||||
push_unique_host(&mut hosts, &listener.ip.to_string());
|
||||
}
|
||||
continue;
|
||||
}
|
||||
push_unique_host(&mut hosts, &listener.ip.to_string());
|
||||
}
|
||||
|
||||
if !hosts.is_empty() {
|
||||
return hosts;
|
||||
}
|
||||
|
||||
if let Some(ip) = startup_detected_ip_v4.or(startup_detected_ip_v6) {
|
||||
return vec![ip.to_string()];
|
||||
}
|
||||
|
||||
if let Some(host) = cfg.server.listen_addr_ipv4.as_deref() {
|
||||
push_host_from_legacy_listen(&mut hosts, host);
|
||||
}
|
||||
if let Some(host) = cfg.server.listen_addr_ipv6.as_deref() {
|
||||
push_host_from_legacy_listen(&mut hosts, host);
|
||||
}
|
||||
if !hosts.is_empty() {
|
||||
return hosts;
|
||||
}
|
||||
|
||||
vec!["UNKNOWN".to_string()]
|
||||
}
|
||||
|
||||
fn push_host_from_legacy_listen(hosts: &mut Vec<String>, raw: &str) {
|
||||
let candidate = raw.trim();
|
||||
if candidate.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
match candidate.parse::<IpAddr>() {
|
||||
Ok(ip) if ip.is_unspecified() => {}
|
||||
Ok(ip) => push_unique_host(hosts, &ip.to_string()),
|
||||
Err(_) => push_unique_host(hosts, candidate),
|
||||
}
|
||||
}
|
||||
|
||||
fn push_unique_host(hosts: &mut Vec<String>, candidate: &str) {
|
||||
if !hosts.iter().any(|existing| existing == candidate) {
|
||||
hosts.push(candidate.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
fn resolve_tls_domains(cfg: &ProxyConfig) -> Vec<&str> {
|
||||
let mut domains = Vec::with_capacity(1 + cfg.censorship.tls_domains.len());
|
||||
let primary = cfg.censorship.tls_domain.as_str();
|
||||
if !primary.is_empty() {
|
||||
domains.push(primary);
|
||||
}
|
||||
for domain in &cfg.censorship.tls_domains {
|
||||
let value = domain.as_str();
|
||||
if value.is_empty() || domains.contains(&value) {
|
||||
continue;
|
||||
}
|
||||
domains.push(value);
|
||||
}
|
||||
domains
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::stats::Stats;
|
||||
|
||||
#[tokio::test]
|
||||
async fn users_from_config_reports_effective_tcp_limit_with_global_fallback() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.access.users.insert(
|
||||
"alice".to_string(),
|
||||
"0123456789abcdef0123456789abcdef".to_string(),
|
||||
);
|
||||
cfg.access.user_max_tcp_conns_global_each = 7;
|
||||
|
||||
let stats = Stats::new();
|
||||
let tracker = UserIpTracker::new();
|
||||
|
||||
let users = users_from_config(&cfg, &stats, &tracker, None, None, None).await;
|
||||
let alice = users
|
||||
.iter()
|
||||
.find(|entry| entry.username == "alice")
|
||||
.expect("alice must be present");
|
||||
assert!(!alice.in_runtime);
|
||||
assert_eq!(alice.max_tcp_conns, Some(7));
|
||||
|
||||
cfg.access.user_max_tcp_conns.insert("alice".to_string(), 5);
|
||||
let users = users_from_config(&cfg, &stats, &tracker, None, None, None).await;
|
||||
let alice = users
|
||||
.iter()
|
||||
.find(|entry| entry.username == "alice")
|
||||
.expect("alice must be present");
|
||||
assert!(!alice.in_runtime);
|
||||
assert_eq!(alice.max_tcp_conns, Some(5));
|
||||
|
||||
cfg.access.user_max_tcp_conns.insert("alice".to_string(), 0);
|
||||
let users = users_from_config(&cfg, &stats, &tracker, None, None, None).await;
|
||||
let alice = users
|
||||
.iter()
|
||||
.find(|entry| entry.username == "alice")
|
||||
.expect("alice must be present");
|
||||
assert!(!alice.in_runtime);
|
||||
assert_eq!(alice.max_tcp_conns, Some(7));
|
||||
|
||||
cfg.access.user_max_tcp_conns_global_each = 0;
|
||||
let users = users_from_config(&cfg, &stats, &tracker, None, None, None).await;
|
||||
let alice = users
|
||||
.iter()
|
||||
.find(|entry| entry.username == "alice")
|
||||
.expect("alice must be present");
|
||||
assert!(!alice.in_runtime);
|
||||
assert_eq!(alice.max_tcp_conns, None);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn users_from_config_marks_runtime_membership_when_snapshot_is_provided() {
|
||||
let mut disk_cfg = ProxyConfig::default();
|
||||
disk_cfg.access.users.insert(
|
||||
"alice".to_string(),
|
||||
"0123456789abcdef0123456789abcdef".to_string(),
|
||||
);
|
||||
disk_cfg.access.users.insert(
|
||||
"bob".to_string(),
|
||||
"fedcba9876543210fedcba9876543210".to_string(),
|
||||
);
|
||||
|
||||
let mut runtime_cfg = ProxyConfig::default();
|
||||
runtime_cfg.access.users.insert(
|
||||
"alice".to_string(),
|
||||
"0123456789abcdef0123456789abcdef".to_string(),
|
||||
);
|
||||
|
||||
let stats = Stats::new();
|
||||
let tracker = UserIpTracker::new();
|
||||
let users =
|
||||
users_from_config(&disk_cfg, &stats, &tracker, None, None, Some(&runtime_cfg)).await;
|
||||
|
||||
let alice = users
|
||||
.iter()
|
||||
.find(|entry| entry.username == "alice")
|
||||
.expect("alice must be present");
|
||||
let bob = users
|
||||
.iter()
|
||||
.find(|entry| entry.username == "bob")
|
||||
.expect("bob must be present");
|
||||
|
||||
assert!(alice.in_runtime);
|
||||
assert!(!bob.in_runtime);
|
||||
}
|
||||
}
|
||||
553
src/cli.rs
553
src/cli.rs
|
|
@ -1,11 +1,270 @@
|
|||
//! CLI commands: --init (fire-and-forget setup)
|
||||
//! CLI commands: --init (fire-and-forget setup), daemon options, subcommands
|
||||
//!
|
||||
//! Subcommands:
|
||||
//! - `start [OPTIONS] [config.toml]` - Start the daemon
|
||||
//! - `stop [--pid-file PATH]` - Stop a running daemon
|
||||
//! - `reload [--pid-file PATH]` - Reload configuration (SIGHUP)
|
||||
//! - `status [--pid-file PATH]` - Check daemon status
|
||||
//! - `run [OPTIONS] [config.toml]` - Run in foreground (default behavior)
|
||||
|
||||
use rand::RngExt;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command;
|
||||
use rand::Rng;
|
||||
|
||||
#[cfg(unix)]
|
||||
use crate::daemon::{self, DEFAULT_PID_FILE, DaemonOptions};
|
||||
|
||||
/// CLI subcommand to execute.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Subcommand {
|
||||
/// Run the proxy (default, or explicit `run` subcommand).
|
||||
Run,
|
||||
/// Start as daemon (`start` subcommand).
|
||||
Start,
|
||||
/// Stop a running daemon (`stop` subcommand).
|
||||
Stop,
|
||||
/// Reload configuration (`reload` subcommand).
|
||||
Reload,
|
||||
/// Check daemon status (`status` subcommand).
|
||||
Status,
|
||||
/// Fire-and-forget setup (`--init`).
|
||||
Init,
|
||||
}
|
||||
|
||||
/// Parsed subcommand with its options.
|
||||
#[derive(Debug)]
|
||||
pub struct ParsedCommand {
|
||||
pub subcommand: Subcommand,
|
||||
pub pid_file: PathBuf,
|
||||
pub config_path: String,
|
||||
#[cfg(unix)]
|
||||
pub daemon_opts: DaemonOptions,
|
||||
pub init_opts: Option<InitOptions>,
|
||||
}
|
||||
|
||||
impl Default for ParsedCommand {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
subcommand: Subcommand::Run,
|
||||
#[cfg(unix)]
|
||||
pid_file: PathBuf::from(DEFAULT_PID_FILE),
|
||||
#[cfg(not(unix))]
|
||||
pid_file: PathBuf::from("/var/run/telemt.pid"),
|
||||
config_path: "config.toml".to_string(),
|
||||
#[cfg(unix)]
|
||||
daemon_opts: DaemonOptions::default(),
|
||||
init_opts: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse CLI arguments into a command structure.
|
||||
pub fn parse_command(args: &[String]) -> ParsedCommand {
|
||||
let mut cmd = ParsedCommand::default();
|
||||
|
||||
// Check for --init first (legacy form)
|
||||
if args.iter().any(|a| a == "--init") {
|
||||
cmd.subcommand = Subcommand::Init;
|
||||
cmd.init_opts = parse_init_args(args);
|
||||
return cmd;
|
||||
}
|
||||
|
||||
// Check for subcommand as first argument
|
||||
if let Some(first) = args.first() {
|
||||
match first.as_str() {
|
||||
"start" => {
|
||||
cmd.subcommand = Subcommand::Start;
|
||||
#[cfg(unix)]
|
||||
{
|
||||
cmd.daemon_opts = parse_daemon_args(args);
|
||||
// Force daemonize for start command
|
||||
cmd.daemon_opts.daemonize = true;
|
||||
}
|
||||
}
|
||||
"stop" => {
|
||||
cmd.subcommand = Subcommand::Stop;
|
||||
}
|
||||
"reload" => {
|
||||
cmd.subcommand = Subcommand::Reload;
|
||||
}
|
||||
"status" => {
|
||||
cmd.subcommand = Subcommand::Status;
|
||||
}
|
||||
"run" => {
|
||||
cmd.subcommand = Subcommand::Run;
|
||||
#[cfg(unix)]
|
||||
{
|
||||
cmd.daemon_opts = parse_daemon_args(args);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// No subcommand, default to Run
|
||||
#[cfg(unix)]
|
||||
{
|
||||
cmd.daemon_opts = parse_daemon_args(args);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse remaining options
|
||||
let mut i = 0;
|
||||
while i < args.len() {
|
||||
match args[i].as_str() {
|
||||
// Skip subcommand names
|
||||
"start" | "stop" | "reload" | "status" | "run" => {}
|
||||
// PID file option (for stop/reload/status)
|
||||
"--pid-file" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
cmd.pid_file = PathBuf::from(&args[i]);
|
||||
#[cfg(unix)]
|
||||
{
|
||||
cmd.daemon_opts.pid_file = Some(cmd.pid_file.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--pid-file=") => {
|
||||
cmd.pid_file = PathBuf::from(s.trim_start_matches("--pid-file="));
|
||||
#[cfg(unix)]
|
||||
{
|
||||
cmd.daemon_opts.pid_file = Some(cmd.pid_file.clone());
|
||||
}
|
||||
}
|
||||
// Config path (positional, non-flag argument)
|
||||
s if !s.starts_with('-') => {
|
||||
cmd.config_path = s.to_string();
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
cmd
|
||||
}
|
||||
|
||||
/// Execute a subcommand that doesn't require starting the server.
|
||||
/// Returns `Some(exit_code)` if the command was handled, `None` if server should start.
|
||||
#[cfg(unix)]
|
||||
pub fn execute_subcommand(cmd: &ParsedCommand) -> Option<i32> {
|
||||
match cmd.subcommand {
|
||||
Subcommand::Stop => Some(cmd_stop(&cmd.pid_file)),
|
||||
Subcommand::Reload => Some(cmd_reload(&cmd.pid_file)),
|
||||
Subcommand::Status => Some(cmd_status(&cmd.pid_file)),
|
||||
Subcommand::Init => {
|
||||
if let Some(opts) = cmd.init_opts.clone() {
|
||||
match run_init(opts) {
|
||||
Ok(()) => Some(0),
|
||||
Err(e) => {
|
||||
eprintln!("[telemt] Init failed: {}", e);
|
||||
Some(1)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Some(1)
|
||||
}
|
||||
}
|
||||
// Run and Start need the server
|
||||
Subcommand::Run | Subcommand::Start => None,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
pub fn execute_subcommand(cmd: &ParsedCommand) -> Option<i32> {
|
||||
match cmd.subcommand {
|
||||
Subcommand::Stop | Subcommand::Reload | Subcommand::Status => {
|
||||
eprintln!("[telemt] Subcommand not supported on this platform");
|
||||
Some(1)
|
||||
}
|
||||
Subcommand::Init => {
|
||||
if let Some(opts) = cmd.init_opts.clone() {
|
||||
match run_init(opts) {
|
||||
Ok(()) => Some(0),
|
||||
Err(e) => {
|
||||
eprintln!("[telemt] Init failed: {}", e);
|
||||
Some(1)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Some(1)
|
||||
}
|
||||
}
|
||||
Subcommand::Run | Subcommand::Start => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Stop command: send SIGTERM to the running daemon.
|
||||
#[cfg(unix)]
|
||||
fn cmd_stop(pid_file: &Path) -> i32 {
|
||||
use nix::sys::signal::Signal;
|
||||
|
||||
println!("Stopping telemt daemon...");
|
||||
|
||||
match daemon::signal_pid_file(pid_file, Signal::SIGTERM) {
|
||||
Ok(()) => {
|
||||
println!("Stop signal sent successfully");
|
||||
|
||||
// Wait for process to exit (up to 10 seconds)
|
||||
for _ in 0..20 {
|
||||
std::thread::sleep(std::time::Duration::from_millis(500));
|
||||
if let daemon::DaemonStatus::NotRunning = daemon::check_status(pid_file) {
|
||||
println!("Daemon stopped");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
println!("Daemon may still be shutting down");
|
||||
0
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Failed to stop daemon: {}", e);
|
||||
1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Reload command: send SIGHUP to trigger config reload.
|
||||
#[cfg(unix)]
|
||||
fn cmd_reload(pid_file: &Path) -> i32 {
|
||||
use nix::sys::signal::Signal;
|
||||
|
||||
println!("Reloading telemt configuration...");
|
||||
|
||||
match daemon::signal_pid_file(pid_file, Signal::SIGHUP) {
|
||||
Ok(()) => {
|
||||
println!("Reload signal sent successfully");
|
||||
0
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Failed to reload daemon: {}", e);
|
||||
1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Status command: check if daemon is running.
|
||||
#[cfg(unix)]
|
||||
fn cmd_status(pid_file: &Path) -> i32 {
|
||||
match daemon::check_status(pid_file) {
|
||||
daemon::DaemonStatus::Running(pid) => {
|
||||
println!("telemt is running (pid {})", pid);
|
||||
0
|
||||
}
|
||||
daemon::DaemonStatus::Stale(pid) => {
|
||||
println!("telemt is not running (stale pid file, was pid {})", pid);
|
||||
// Clean up stale PID file
|
||||
let _ = std::fs::remove_file(pid_file);
|
||||
1
|
||||
}
|
||||
daemon::DaemonStatus::NotRunning => {
|
||||
println!("telemt is not running");
|
||||
1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Options for the init command
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct InitOptions {
|
||||
pub port: u16,
|
||||
pub domain: String,
|
||||
|
|
@ -15,6 +274,64 @@ pub struct InitOptions {
|
|||
pub no_start: bool,
|
||||
}
|
||||
|
||||
/// Parse daemon-related options from CLI args.
|
||||
#[cfg(unix)]
|
||||
pub fn parse_daemon_args(args: &[String]) -> DaemonOptions {
|
||||
let mut opts = DaemonOptions::default();
|
||||
let mut i = 0;
|
||||
|
||||
while i < args.len() {
|
||||
match args[i].as_str() {
|
||||
"--daemon" | "-d" => {
|
||||
opts.daemonize = true;
|
||||
}
|
||||
"--foreground" | "-f" => {
|
||||
opts.foreground = true;
|
||||
}
|
||||
"--pid-file" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
opts.pid_file = Some(PathBuf::from(&args[i]));
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--pid-file=") => {
|
||||
opts.pid_file = Some(PathBuf::from(s.trim_start_matches("--pid-file=")));
|
||||
}
|
||||
"--run-as-user" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
opts.user = Some(args[i].clone());
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--run-as-user=") => {
|
||||
opts.user = Some(s.trim_start_matches("--run-as-user=").to_string());
|
||||
}
|
||||
"--run-as-group" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
opts.group = Some(args[i].clone());
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--run-as-group=") => {
|
||||
opts.group = Some(s.trim_start_matches("--run-as-group=").to_string());
|
||||
}
|
||||
"--working-dir" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
opts.working_dir = Some(PathBuf::from(&args[i]));
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--working-dir=") => {
|
||||
opts.working_dir = Some(PathBuf::from(s.trim_start_matches("--working-dir=")));
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
opts
|
||||
}
|
||||
|
||||
impl Default for InitOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
|
|
@ -35,10 +352,10 @@ pub fn parse_init_args(args: &[String]) -> Option<InitOptions> {
|
|||
if !args.iter().any(|a| a == "--init") {
|
||||
return None;
|
||||
}
|
||||
|
||||
|
||||
let mut opts = InitOptions::default();
|
||||
let mut i = 0;
|
||||
|
||||
|
||||
while i < args.len() {
|
||||
match args[i].as_str() {
|
||||
"--port" => {
|
||||
|
|
@ -78,16 +395,22 @@ pub fn parse_init_args(args: &[String]) -> Option<InitOptions> {
|
|||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
|
||||
Some(opts)
|
||||
}
|
||||
|
||||
/// Run the fire-and-forget setup.
|
||||
pub fn run_init(opts: InitOptions) -> Result<(), Box<dyn std::error::Error>> {
|
||||
use crate::service::{self, InitSystem, ServiceOptions};
|
||||
|
||||
eprintln!("[telemt] Fire-and-forget setup");
|
||||
eprintln!();
|
||||
|
||||
// 1. Generate or validate secret
|
||||
|
||||
// 1. Detect init system
|
||||
let init_system = service::detect_init_system();
|
||||
eprintln!("[+] Detected init system: {}", init_system);
|
||||
|
||||
// 2. Generate or validate secret
|
||||
let secret = match opts.secret {
|
||||
Some(s) => {
|
||||
if s.len() != 32 || !s.chars().all(|c| c.is_ascii_hexdigit()) {
|
||||
|
|
@ -98,80 +421,134 @@ pub fn run_init(opts: InitOptions) -> Result<(), Box<dyn std::error::Error>> {
|
|||
}
|
||||
None => generate_secret(),
|
||||
};
|
||||
|
||||
|
||||
eprintln!("[+] Secret: {}", secret);
|
||||
eprintln!("[+] User: {}", opts.username);
|
||||
eprintln!("[+] Port: {}", opts.port);
|
||||
eprintln!("[+] Domain: {}", opts.domain);
|
||||
|
||||
// 2. Create config directory
|
||||
|
||||
// 3. Create config directory
|
||||
fs::create_dir_all(&opts.config_dir)?;
|
||||
let config_path = opts.config_dir.join("config.toml");
|
||||
|
||||
// 3. Write config
|
||||
|
||||
// 4. Write config
|
||||
let config_content = generate_config(&opts.username, &secret, opts.port, &opts.domain);
|
||||
fs::write(&config_path, &config_content)?;
|
||||
eprintln!("[+] Config written to {}", config_path.display());
|
||||
|
||||
// 4. Write systemd unit
|
||||
let exe_path = std::env::current_exe()
|
||||
.unwrap_or_else(|_| PathBuf::from("/usr/local/bin/telemt"));
|
||||
|
||||
let unit_path = Path::new("/etc/systemd/system/telemt.service");
|
||||
let unit_content = generate_systemd_unit(&exe_path, &config_path);
|
||||
|
||||
match fs::write(unit_path, &unit_content) {
|
||||
|
||||
// 5. Generate and write service file
|
||||
let exe_path =
|
||||
std::env::current_exe().unwrap_or_else(|_| PathBuf::from("/usr/local/bin/telemt"));
|
||||
|
||||
let service_opts = ServiceOptions {
|
||||
exe_path: &exe_path,
|
||||
config_path: &config_path,
|
||||
user: None, // Let systemd/init handle user
|
||||
group: None,
|
||||
pid_file: "/var/run/telemt.pid",
|
||||
working_dir: Some("/var/lib/telemt"),
|
||||
description: "Telemt MTProxy - Telegram MTProto Proxy",
|
||||
};
|
||||
|
||||
let service_path = service::service_file_path(init_system);
|
||||
let service_content = service::generate_service_file(init_system, &service_opts);
|
||||
|
||||
// Ensure parent directory exists
|
||||
if let Some(parent) = Path::new(service_path).parent() {
|
||||
let _ = fs::create_dir_all(parent);
|
||||
}
|
||||
|
||||
match fs::write(service_path, &service_content) {
|
||||
Ok(()) => {
|
||||
eprintln!("[+] Systemd unit written to {}", unit_path.display());
|
||||
eprintln!("[+] Service file written to {}", service_path);
|
||||
|
||||
// Make script executable for OpenRC/FreeBSD
|
||||
#[cfg(unix)]
|
||||
if init_system == InitSystem::OpenRC || init_system == InitSystem::FreeBSDRc {
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let mut perms = fs::metadata(service_path)?.permissions();
|
||||
perms.set_mode(0o755);
|
||||
fs::set_permissions(service_path, perms)?;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("[!] Cannot write systemd unit (run as root?): {}", e);
|
||||
eprintln!("[!] Manual unit file content:");
|
||||
eprintln!("{}", unit_content);
|
||||
|
||||
// Still print links and config
|
||||
eprintln!("[!] Cannot write service file (run as root?): {}", e);
|
||||
eprintln!("[!] Manual service file content:");
|
||||
eprintln!("{}", service_content);
|
||||
|
||||
// Still print links and installation instructions
|
||||
eprintln!();
|
||||
eprintln!("{}", service::installation_instructions(init_system));
|
||||
print_links(&opts.username, &secret, opts.port, &opts.domain);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Reload systemd
|
||||
run_cmd("systemctl", &["daemon-reload"]);
|
||||
|
||||
// 6. Enable service
|
||||
run_cmd("systemctl", &["enable", "telemt.service"]);
|
||||
eprintln!("[+] Service enabled");
|
||||
|
||||
// 7. Start service (unless --no-start)
|
||||
if !opts.no_start {
|
||||
run_cmd("systemctl", &["start", "telemt.service"]);
|
||||
eprintln!("[+] Service started");
|
||||
|
||||
// Brief delay then check status
|
||||
std::thread::sleep(std::time::Duration::from_secs(1));
|
||||
let status = Command::new("systemctl")
|
||||
.args(["is-active", "telemt.service"])
|
||||
.output();
|
||||
|
||||
match status {
|
||||
Ok(out) if out.status.success() => {
|
||||
eprintln!("[+] Service is running");
|
||||
}
|
||||
_ => {
|
||||
eprintln!("[!] Service may not have started correctly");
|
||||
eprintln!("[!] Check: journalctl -u telemt.service -n 20");
|
||||
|
||||
// 6. Install and enable service based on init system
|
||||
match init_system {
|
||||
InitSystem::Systemd => {
|
||||
run_cmd("systemctl", &["daemon-reload"]);
|
||||
run_cmd("systemctl", &["enable", "telemt.service"]);
|
||||
eprintln!("[+] Service enabled");
|
||||
|
||||
if !opts.no_start {
|
||||
run_cmd("systemctl", &["start", "telemt.service"]);
|
||||
eprintln!("[+] Service started");
|
||||
|
||||
std::thread::sleep(std::time::Duration::from_secs(1));
|
||||
let status = Command::new("systemctl")
|
||||
.args(["is-active", "telemt.service"])
|
||||
.output();
|
||||
|
||||
match status {
|
||||
Ok(out) if out.status.success() => {
|
||||
eprintln!("[+] Service is running");
|
||||
}
|
||||
_ => {
|
||||
eprintln!("[!] Service may not have started correctly");
|
||||
eprintln!("[!] Check: journalctl -u telemt.service -n 20");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
eprintln!("[+] Service not started (--no-start)");
|
||||
eprintln!("[+] Start manually: systemctl start telemt.service");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
eprintln!("[+] Service not started (--no-start)");
|
||||
eprintln!("[+] Start manually: systemctl start telemt.service");
|
||||
InitSystem::OpenRC => {
|
||||
run_cmd("rc-update", &["add", "telemt", "default"]);
|
||||
eprintln!("[+] Service enabled");
|
||||
|
||||
if !opts.no_start {
|
||||
run_cmd("rc-service", &["telemt", "start"]);
|
||||
eprintln!("[+] Service started");
|
||||
} else {
|
||||
eprintln!("[+] Service not started (--no-start)");
|
||||
eprintln!("[+] Start manually: rc-service telemt start");
|
||||
}
|
||||
}
|
||||
InitSystem::FreeBSDRc => {
|
||||
run_cmd("sysrc", &["telemt_enable=YES"]);
|
||||
eprintln!("[+] Service enabled");
|
||||
|
||||
if !opts.no_start {
|
||||
run_cmd("service", &["telemt", "start"]);
|
||||
eprintln!("[+] Service started");
|
||||
} else {
|
||||
eprintln!("[+] Service not started (--no-start)");
|
||||
eprintln!("[+] Start manually: service telemt start");
|
||||
}
|
||||
}
|
||||
InitSystem::Unknown => {
|
||||
eprintln!("[!] Unknown init system - service file written but not installed");
|
||||
eprintln!("[!] You may need to install it manually");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
eprintln!();
|
||||
|
||||
// 8. Print links
|
||||
|
||||
// 7. Print links
|
||||
print_links(&opts.username, &secret, opts.port, &opts.domain);
|
||||
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
@ -183,7 +560,7 @@ fn generate_secret() -> String {
|
|||
|
||||
fn generate_config(username: &str, secret: &str, port: u16, domain: &str) -> String {
|
||||
format!(
|
||||
r#"# Telemt MTProxy — auto-generated config
|
||||
r#"# Telemt MTProxy — auto-generated config
|
||||
# Re-run `telemt --init` to regenerate
|
||||
|
||||
show_link = ["{username}"]
|
||||
|
|
@ -198,8 +575,16 @@ desync_all_full = false
|
|||
update_every = 43200
|
||||
hardswap = false
|
||||
me_pool_drain_ttl_secs = 90
|
||||
me_instadrain = false
|
||||
me_pool_drain_threshold = 32
|
||||
me_pool_drain_soft_evict_grace_secs = 10
|
||||
me_pool_drain_soft_evict_per_writer = 2
|
||||
me_pool_drain_soft_evict_budget_per_core = 16
|
||||
me_pool_drain_soft_evict_cooldown_ms = 1000
|
||||
me_bind_stale_mode = "never"
|
||||
me_pool_min_fresh_ratio = 0.8
|
||||
me_reinit_drain_timeout_secs = 120
|
||||
me_reinit_drain_timeout_secs = 90
|
||||
tg_connect = 10
|
||||
|
||||
[network]
|
||||
ipv4 = true
|
||||
|
|
@ -225,8 +610,8 @@ ip = "0.0.0.0"
|
|||
ip = "::"
|
||||
|
||||
[timeouts]
|
||||
client_handshake = 15
|
||||
tg_connect = 10
|
||||
client_first_byte_idle_secs = 300
|
||||
client_handshake = 60
|
||||
client_keepalive = 60
|
||||
client_ack = 300
|
||||
|
||||
|
|
@ -238,8 +623,9 @@ fake_cert_len = 2048
|
|||
tls_full_cert_ttl_secs = 90
|
||||
|
||||
[access]
|
||||
user_max_tcp_conns_global_each = 0
|
||||
replay_check_len = 65536
|
||||
replay_window_secs = 1800
|
||||
replay_window_secs = 120
|
||||
ignore_time_skew = false
|
||||
|
||||
[access.users]
|
||||
|
|
@ -257,35 +643,6 @@ weight = 10
|
|||
)
|
||||
}
|
||||
|
||||
fn generate_systemd_unit(exe_path: &Path, config_path: &Path) -> String {
|
||||
format!(
|
||||
r#"[Unit]
|
||||
Description=Telemt MTProxy
|
||||
Documentation=https://github.com/nicepkg/telemt
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart={exe} {config}
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
LimitNOFILE=65535
|
||||
# Security hardening
|
||||
NoNewPrivileges=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/etc/telemt
|
||||
PrivateTmp=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
"#,
|
||||
exe = exe_path.display(),
|
||||
config = config_path.display(),
|
||||
)
|
||||
}
|
||||
|
||||
fn run_cmd(cmd: &str, args: &[&str]) {
|
||||
match Command::new(cmd).args(args).output() {
|
||||
Ok(output) => {
|
||||
|
|
@ -302,11 +659,13 @@ fn run_cmd(cmd: &str, args: &[&str]) {
|
|||
|
||||
fn print_links(username: &str, secret: &str, port: u16, domain: &str) {
|
||||
let domain_hex = hex::encode(domain);
|
||||
|
||||
|
||||
println!("=== Proxy Links ===");
|
||||
println!("[{}]", username);
|
||||
println!(" EE-TLS: tg://proxy?server=YOUR_SERVER_IP&port={}&secret=ee{}{}",
|
||||
port, secret, domain_hex);
|
||||
println!(
|
||||
" EE-TLS: tg://proxy?server=YOUR_SERVER_IP&port={}&secret=ee{}{}",
|
||||
port, secret, domain_hex
|
||||
);
|
||||
println!();
|
||||
println!("Replace YOUR_SERVER_IP with your server's public IP.");
|
||||
println!("The proxy will auto-detect and display the correct link on startup.");
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
use std::collections::HashMap;
|
||||
use ipnetwork::IpNetwork;
|
||||
use serde::Deserialize;
|
||||
use std::collections::HashMap;
|
||||
|
||||
// Helper defaults kept private to the config module.
|
||||
const DEFAULT_NETWORK_IPV6: Option<bool> = Some(false);
|
||||
|
|
@ -11,9 +11,50 @@ const DEFAULT_ME_RECONNECT_FAST_RETRY_COUNT: u32 = 16;
|
|||
const DEFAULT_ME_SINGLE_ENDPOINT_SHADOW_WRITERS: u8 = 2;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_IDLE_SECS: u64 = 90;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MIN_WRITERS_SINGLE_ENDPOINT: u8 = 1;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MIN_WRITERS_MULTI_ENDPOINT: u8 = 1;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_RECOVER_GRACE_SECS: u64 = 180;
|
||||
const DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS: u32 = 3;
|
||||
const DEFAULT_UPSTREAM_UNHEALTHY_FAIL_THRESHOLD: u32 = 4;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_WRITERS_PER_CORE_TOTAL: u16 = 48;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_CPU_CORES_OVERRIDE: u16 = 0;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_EXTRA_WRITERS_SINGLE_PER_CORE: u16 = 1;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_EXTRA_WRITERS_MULTI_PER_CORE: u16 = 2;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_ACTIVE_WRITERS_PER_CORE: u16 = 64;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_WARM_WRITERS_PER_CORE: u16 = 64;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_ACTIVE_WRITERS_GLOBAL: u32 = 256;
|
||||
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_WARM_WRITERS_GLOBAL: u32 = 256;
|
||||
const DEFAULT_ME_WRITER_CMD_CHANNEL_CAPACITY: usize = 4096;
|
||||
const DEFAULT_ME_ROUTE_CHANNEL_CAPACITY: usize = 768;
|
||||
const DEFAULT_ME_C2ME_CHANNEL_CAPACITY: usize = 1024;
|
||||
const DEFAULT_ME_READER_ROUTE_DATA_WAIT_MS: u64 = 2;
|
||||
const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_FRAMES: usize = 32;
|
||||
const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_BYTES: usize = 128 * 1024;
|
||||
const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_DELAY_US: u64 = 500;
|
||||
const DEFAULT_ME_D2C_ACK_FLUSH_IMMEDIATE: bool = true;
|
||||
const DEFAULT_ME_QUOTA_SOFT_OVERSHOOT_BYTES: u64 = 64 * 1024;
|
||||
const DEFAULT_ME_D2C_FRAME_BUF_SHRINK_THRESHOLD_BYTES: usize = 256 * 1024;
|
||||
const DEFAULT_DIRECT_RELAY_COPY_BUF_C2S_BYTES: usize = 64 * 1024;
|
||||
const DEFAULT_DIRECT_RELAY_COPY_BUF_S2C_BYTES: usize = 256 * 1024;
|
||||
const DEFAULT_ME_WRITER_PICK_SAMPLE_SIZE: u8 = 3;
|
||||
const DEFAULT_ME_HEALTH_INTERVAL_MS_UNHEALTHY: u64 = 1000;
|
||||
const DEFAULT_ME_HEALTH_INTERVAL_MS_HEALTHY: u64 = 3000;
|
||||
const DEFAULT_ME_ADMISSION_POLL_MS: u64 = 1000;
|
||||
const DEFAULT_ME_WARN_RATE_LIMIT_MS: u64 = 5000;
|
||||
const DEFAULT_ME_ROUTE_HYBRID_MAX_WAIT_MS: u64 = 3000;
|
||||
const DEFAULT_ME_ROUTE_BLOCKING_SEND_TIMEOUT_MS: u64 = 250;
|
||||
const DEFAULT_ME_C2ME_SEND_TIMEOUT_MS: u64 = 4000;
|
||||
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_ENABLED: bool = true;
|
||||
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_GRACE_SECS: u64 = 10;
|
||||
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_PER_WRITER: u8 = 2;
|
||||
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_BUDGET_PER_CORE: u16 = 16;
|
||||
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_COOLDOWN_MS: u64 = 1000;
|
||||
const DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS: u64 = 30;
|
||||
const DEFAULT_ACCEPT_PERMIT_TIMEOUT_MS: u64 = 250;
|
||||
const DEFAULT_CONNTRACK_CONTROL_ENABLED: bool = true;
|
||||
const DEFAULT_CONNTRACK_PRESSURE_HIGH_WATERMARK_PCT: u8 = 85;
|
||||
const DEFAULT_CONNTRACK_PRESSURE_LOW_WATERMARK_PCT: u8 = 70;
|
||||
const DEFAULT_CONNTRACK_DELETE_BUDGET_PER_SEC: u64 = 4096;
|
||||
const DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS: u32 = 2;
|
||||
const DEFAULT_UPSTREAM_UNHEALTHY_FAIL_THRESHOLD: u32 = 5;
|
||||
const DEFAULT_UPSTREAM_CONNECT_BUDGET_MS: u64 = 3000;
|
||||
const DEFAULT_LISTEN_ADDR_IPV6: &str = "::";
|
||||
const DEFAULT_ACCESS_USER: &str = "default";
|
||||
const DEFAULT_ACCESS_SECRET: &str = "00000000000000000000000000000000";
|
||||
|
|
@ -30,6 +71,26 @@ pub(crate) fn default_tls_domain() -> String {
|
|||
"petrovich.ru".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_fetch_scope() -> String {
|
||||
String::new()
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_fetch_attempt_timeout_ms() -> u64 {
|
||||
5_000
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_fetch_total_budget_ms() -> u64 {
|
||||
15_000
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_fetch_strict_route() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_fetch_profile_cache_ttl_secs() -> u64 {
|
||||
600
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_port() -> u16 {
|
||||
443
|
||||
}
|
||||
|
|
@ -39,7 +100,7 @@ pub(crate) fn default_fake_cert_len() -> usize {
|
|||
}
|
||||
|
||||
pub(crate) fn default_tls_front_dir() -> String {
|
||||
"tlsfront".to_string()
|
||||
"/etc/telemt/tlsfront".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_replay_check_len() -> usize {
|
||||
|
|
@ -47,10 +108,32 @@ pub(crate) fn default_replay_check_len() -> usize {
|
|||
}
|
||||
|
||||
pub(crate) fn default_replay_window_secs() -> u64 {
|
||||
1800
|
||||
// Keep replay cache TTL tight by default to reduce replay surface.
|
||||
// Deployments with higher RTT or longer reconnect jitter can override this in config.
|
||||
120
|
||||
}
|
||||
|
||||
pub(crate) fn default_handshake_timeout() -> u64 {
|
||||
60
|
||||
}
|
||||
|
||||
pub(crate) fn default_client_first_byte_idle_secs() -> u64 {
|
||||
300
|
||||
}
|
||||
|
||||
pub(crate) fn default_relay_idle_policy_v2_enabled() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_relay_client_idle_soft_secs() -> u64 {
|
||||
120
|
||||
}
|
||||
|
||||
pub(crate) fn default_relay_client_idle_hard_secs() -> u64 {
|
||||
360
|
||||
}
|
||||
|
||||
pub(crate) fn default_relay_idle_grace_after_downstream_activity_secs() -> u64 {
|
||||
30
|
||||
}
|
||||
|
||||
|
|
@ -59,11 +142,11 @@ pub(crate) fn default_connect_timeout() -> u64 {
|
|||
}
|
||||
|
||||
pub(crate) fn default_keepalive() -> u64 {
|
||||
60
|
||||
15
|
||||
}
|
||||
|
||||
pub(crate) fn default_ack_timeout() -> u64 {
|
||||
300
|
||||
90
|
||||
}
|
||||
pub(crate) fn default_me_one_retry() -> u8 {
|
||||
12
|
||||
|
|
@ -86,10 +169,76 @@ pub(crate) fn default_weight() -> u16 {
|
|||
}
|
||||
|
||||
pub(crate) fn default_metrics_whitelist() -> Vec<IpNetwork> {
|
||||
vec![
|
||||
"127.0.0.1/32".parse().unwrap(),
|
||||
"::1/128".parse().unwrap(),
|
||||
]
|
||||
vec!["127.0.0.1/32".parse().unwrap(), "::1/128".parse().unwrap()]
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_listen() -> String {
|
||||
"0.0.0.0:9091".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_whitelist() -> Vec<IpNetwork> {
|
||||
vec!["127.0.0.0/8".parse().unwrap()]
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_request_body_limit_bytes() -> usize {
|
||||
64 * 1024
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_minimal_runtime_enabled() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_minimal_runtime_cache_ttl_ms() -> u64 {
|
||||
1000
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_runtime_edge_enabled() -> bool {
|
||||
false
|
||||
}
|
||||
pub(crate) fn default_api_runtime_edge_cache_ttl_ms() -> u64 {
|
||||
1000
|
||||
}
|
||||
pub(crate) fn default_api_runtime_edge_top_n() -> usize {
|
||||
10
|
||||
}
|
||||
pub(crate) fn default_api_runtime_edge_events_capacity() -> usize {
|
||||
256
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_protocol_header_timeout_ms() -> u64 {
|
||||
500
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_protocol_trusted_cidrs() -> Vec<IpNetwork> {
|
||||
vec!["0.0.0.0/0".parse().unwrap(), "::/0".parse().unwrap()]
|
||||
}
|
||||
|
||||
pub(crate) fn default_server_max_connections() -> u32 {
|
||||
10_000
|
||||
}
|
||||
|
||||
pub(crate) fn default_listen_backlog() -> u32 {
|
||||
1024
|
||||
}
|
||||
|
||||
pub(crate) fn default_accept_permit_timeout_ms() -> u64 {
|
||||
DEFAULT_ACCEPT_PERMIT_TIMEOUT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_conntrack_control_enabled() -> bool {
|
||||
DEFAULT_CONNTRACK_CONTROL_ENABLED
|
||||
}
|
||||
|
||||
pub(crate) fn default_conntrack_pressure_high_watermark_pct() -> u8 {
|
||||
DEFAULT_CONNTRACK_PRESSURE_HIGH_WATERMARK_PCT
|
||||
}
|
||||
|
||||
pub(crate) fn default_conntrack_pressure_low_watermark_pct() -> u8 {
|
||||
DEFAULT_CONNTRACK_PRESSURE_LOW_WATERMARK_PCT
|
||||
}
|
||||
|
||||
pub(crate) fn default_conntrack_delete_budget_per_sec() -> u64 {
|
||||
DEFAULT_CONNTRACK_DELETE_BUDGET_PER_SEC
|
||||
}
|
||||
|
||||
pub(crate) fn default_prefer_4() -> u8 {
|
||||
|
|
@ -108,6 +257,10 @@ pub(crate) fn default_unknown_dc_log_path() -> Option<String> {
|
|||
Some("unknown-dc.txt".to_string())
|
||||
}
|
||||
|
||||
pub(crate) fn default_unknown_dc_file_log_enabled() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_pool_size() -> usize {
|
||||
8
|
||||
}
|
||||
|
|
@ -116,6 +269,14 @@ pub(crate) fn default_proxy_secret_path() -> Option<String> {
|
|||
Some("proxy-secret".to_string())
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_config_v4_cache_path() -> Option<String> {
|
||||
Some("cache/proxy-config-v4.txt".to_string())
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_config_v6_cache_path() -> Option<String> {
|
||||
Some("cache/proxy-config-v6.txt".to_string())
|
||||
}
|
||||
|
||||
pub(crate) fn default_middle_proxy_nat_stun() -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
|
@ -132,6 +293,18 @@ pub(crate) fn default_middle_proxy_warm_standby() -> usize {
|
|||
DEFAULT_MIDDLE_PROXY_WARM_STANDBY
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_init_retry_attempts() -> u32 {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_me2dc_fallback() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_me2dc_fast() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_keepalive_interval() -> u64 {
|
||||
8
|
||||
}
|
||||
|
|
@ -196,22 +369,150 @@ pub(crate) fn default_me_adaptive_floor_min_writers_single_endpoint() -> u8 {
|
|||
DEFAULT_ME_ADAPTIVE_FLOOR_MIN_WRITERS_SINGLE_ENDPOINT
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_min_writers_multi_endpoint() -> u8 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MIN_WRITERS_MULTI_ENDPOINT
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_recover_grace_secs() -> u64 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_RECOVER_GRACE_SECS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_writers_per_core_total() -> u16 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_WRITERS_PER_CORE_TOTAL
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_cpu_cores_override() -> u16 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_CPU_CORES_OVERRIDE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_max_extra_writers_single_per_core() -> u16 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_EXTRA_WRITERS_SINGLE_PER_CORE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_max_extra_writers_multi_per_core() -> u16 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_EXTRA_WRITERS_MULTI_PER_CORE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_max_active_writers_per_core() -> u16 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_ACTIVE_WRITERS_PER_CORE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_max_warm_writers_per_core() -> u16 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_WARM_WRITERS_PER_CORE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_max_active_writers_global() -> u32 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_ACTIVE_WRITERS_GLOBAL
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_adaptive_floor_max_warm_writers_global() -> u32 {
|
||||
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_WARM_WRITERS_GLOBAL
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_writer_cmd_channel_capacity() -> usize {
|
||||
DEFAULT_ME_WRITER_CMD_CHANNEL_CAPACITY
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_channel_capacity() -> usize {
|
||||
DEFAULT_ME_ROUTE_CHANNEL_CAPACITY
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_c2me_channel_capacity() -> usize {
|
||||
DEFAULT_ME_C2ME_CHANNEL_CAPACITY
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_reader_route_data_wait_ms() -> u64 {
|
||||
DEFAULT_ME_READER_ROUTE_DATA_WAIT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_d2c_flush_batch_max_frames() -> usize {
|
||||
DEFAULT_ME_D2C_FLUSH_BATCH_MAX_FRAMES
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_d2c_flush_batch_max_bytes() -> usize {
|
||||
DEFAULT_ME_D2C_FLUSH_BATCH_MAX_BYTES
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_d2c_flush_batch_max_delay_us() -> u64 {
|
||||
DEFAULT_ME_D2C_FLUSH_BATCH_MAX_DELAY_US
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_d2c_ack_flush_immediate() -> bool {
|
||||
DEFAULT_ME_D2C_ACK_FLUSH_IMMEDIATE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_quota_soft_overshoot_bytes() -> u64 {
|
||||
DEFAULT_ME_QUOTA_SOFT_OVERSHOOT_BYTES
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_d2c_frame_buf_shrink_threshold_bytes() -> usize {
|
||||
DEFAULT_ME_D2C_FRAME_BUF_SHRINK_THRESHOLD_BYTES
|
||||
}
|
||||
|
||||
pub(crate) fn default_direct_relay_copy_buf_c2s_bytes() -> usize {
|
||||
DEFAULT_DIRECT_RELAY_COPY_BUF_C2S_BYTES
|
||||
}
|
||||
|
||||
pub(crate) fn default_direct_relay_copy_buf_s2c_bytes() -> usize {
|
||||
DEFAULT_DIRECT_RELAY_COPY_BUF_S2C_BYTES
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_writer_pick_sample_size() -> u8 {
|
||||
DEFAULT_ME_WRITER_PICK_SAMPLE_SIZE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_health_interval_ms_unhealthy() -> u64 {
|
||||
DEFAULT_ME_HEALTH_INTERVAL_MS_UNHEALTHY
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_health_interval_ms_healthy() -> u64 {
|
||||
DEFAULT_ME_HEALTH_INTERVAL_MS_HEALTHY
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_admission_poll_ms() -> u64 {
|
||||
DEFAULT_ME_ADMISSION_POLL_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_warn_rate_limit_ms() -> u64 {
|
||||
DEFAULT_ME_WARN_RATE_LIMIT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_hybrid_max_wait_ms() -> u64 {
|
||||
DEFAULT_ME_ROUTE_HYBRID_MAX_WAIT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_blocking_send_timeout_ms() -> u64 {
|
||||
DEFAULT_ME_ROUTE_BLOCKING_SEND_TIMEOUT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_c2me_send_timeout_ms() -> u64 {
|
||||
DEFAULT_ME_C2ME_SEND_TIMEOUT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_upstream_connect_retry_attempts() -> u32 {
|
||||
DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS
|
||||
}
|
||||
|
||||
pub(crate) fn default_upstream_connect_retry_backoff_ms() -> u64 {
|
||||
250
|
||||
100
|
||||
}
|
||||
|
||||
pub(crate) fn default_upstream_unhealthy_fail_threshold() -> u32 {
|
||||
DEFAULT_UPSTREAM_UNHEALTHY_FAIL_THRESHOLD
|
||||
}
|
||||
|
||||
pub(crate) fn default_upstream_connect_budget_ms() -> u64 {
|
||||
DEFAULT_UPSTREAM_CONNECT_BUDGET_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_upstream_connect_failfast_hard_errors() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_rpc_proxy_req_every() -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_crypto_pending_buffer() -> usize {
|
||||
256 * 1024
|
||||
}
|
||||
|
|
@ -236,6 +537,18 @@ pub(crate) fn default_me_route_backpressure_high_watermark_pct() -> u8 {
|
|||
80
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_no_writer_wait_ms() -> u64 {
|
||||
250
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_inline_recovery_attempts() -> u32 {
|
||||
3
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_inline_recovery_wait_ms() -> u64 {
|
||||
3000
|
||||
}
|
||||
|
||||
pub(crate) fn default_beobachten_minutes() -> u64 {
|
||||
10
|
||||
}
|
||||
|
|
@ -245,7 +558,7 @@ pub(crate) fn default_beobachten_flush_secs() -> u64 {
|
|||
}
|
||||
|
||||
pub(crate) fn default_beobachten_file() -> String {
|
||||
"cache/beobachten.txt".to_string()
|
||||
"/etc/telemt/beobachten.txt".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_new_session_tickets() -> u8 {
|
||||
|
|
@ -257,17 +570,67 @@ pub(crate) fn default_tls_full_cert_ttl_secs() -> u64 {
|
|||
}
|
||||
|
||||
pub(crate) fn default_server_hello_delay_min_ms() -> u64 {
|
||||
0
|
||||
8
|
||||
}
|
||||
|
||||
pub(crate) fn default_server_hello_delay_max_ms() -> u64 {
|
||||
0
|
||||
24
|
||||
}
|
||||
|
||||
pub(crate) fn default_alpn_enforce() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_shape_hardening() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_shape_hardening_aggressive_mode() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_shape_bucket_floor_bytes() -> usize {
|
||||
512
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_shape_bucket_cap_bytes() -> usize {
|
||||
4096
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_shape_above_cap_blur() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_shape_above_cap_blur_max_bytes() -> usize {
|
||||
512
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
pub(crate) fn default_mask_relay_max_bytes() -> usize {
|
||||
5 * 1024 * 1024
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn default_mask_relay_max_bytes() -> usize {
|
||||
32 * 1024
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_classifier_prefetch_timeout_ms() -> u64 {
|
||||
5
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_timing_normalization_enabled() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_timing_normalization_floor_ms() -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_timing_normalization_ceiling_ms() -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_stun_servers() -> Vec<String> {
|
||||
vec![
|
||||
"stun.l.google.com:5349".to_string(),
|
||||
|
|
@ -382,13 +745,41 @@ pub(crate) fn default_proxy_secret_len_max() -> usize {
|
|||
}
|
||||
|
||||
pub(crate) fn default_me_reinit_drain_timeout_secs() -> u64 {
|
||||
120
|
||||
90
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_ttl_secs() -> u64 {
|
||||
90
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_instadrain() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_threshold() -> u64 {
|
||||
32
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_soft_evict_enabled() -> bool {
|
||||
DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_ENABLED
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_soft_evict_grace_secs() -> u64 {
|
||||
DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_GRACE_SECS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_soft_evict_per_writer() -> u8 {
|
||||
DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_PER_WRITER
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_soft_evict_budget_per_core() -> u16 {
|
||||
DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_BUDGET_PER_CORE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_soft_evict_cooldown_ms() -> u64 {
|
||||
DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_COOLDOWN_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_bind_stale_ttl_secs() -> u64 {
|
||||
default_me_pool_drain_ttl_secs()
|
||||
}
|
||||
|
|
@ -436,6 +827,18 @@ pub(crate) fn default_access_users() -> HashMap<String, String> {
|
|||
)])
|
||||
}
|
||||
|
||||
pub(crate) fn default_user_max_unique_ips_window_secs() -> u64 {
|
||||
DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS
|
||||
}
|
||||
|
||||
pub(crate) fn default_user_max_tcp_conns_global_each() -> usize {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_user_max_unique_ips_global_each() -> usize {
|
||||
0
|
||||
}
|
||||
|
||||
// Custom deserializer helpers
|
||||
|
||||
#[derive(Deserialize)]
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
1738
src/config/load.rs
1738
src/config/load.rs
File diff suppressed because it is too large
Load Diff
|
|
@ -1,9 +1,9 @@
|
|||
//! Configuration.
|
||||
|
||||
pub(crate) mod defaults;
|
||||
mod types;
|
||||
mod load;
|
||||
pub mod hot_reload;
|
||||
mod load;
|
||||
mod types;
|
||||
|
||||
pub use load::ProxyConfig;
|
||||
pub use types::*;
|
||||
|
|
|
|||
|
|
@ -0,0 +1,102 @@
|
|||
use super::*;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn write_temp_config(contents: &str) -> PathBuf {
|
||||
let nonce = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("system time must be after unix epoch")
|
||||
.as_nanos();
|
||||
let path = std::env::temp_dir().join(format!("telemt-idle-policy-{nonce}.toml"));
|
||||
fs::write(&path, contents).expect("temp config write must succeed");
|
||||
path
|
||||
}
|
||||
|
||||
fn remove_temp_config(path: &PathBuf) {
|
||||
let _ = fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn default_timeouts_enable_apple_compatible_handshake_profile() {
|
||||
let cfg = ProxyConfig::default();
|
||||
assert_eq!(cfg.timeouts.client_first_byte_idle_secs, 300);
|
||||
assert_eq!(cfg.timeouts.client_handshake, 60);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_zero_first_byte_idle_timeout_as_legacy_opt_out() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
client_first_byte_idle_secs = 0
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg = ProxyConfig::load(&path).expect("config with zero first-byte idle timeout must load");
|
||||
assert_eq!(cfg.timeouts.client_first_byte_idle_secs, 0);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_relay_hard_idle_smaller_than_soft_idle_with_clear_error() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
relay_client_idle_soft_secs = 120
|
||||
relay_client_idle_hard_secs = 60
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path).expect_err("config with hard<soft must fail");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains(
|
||||
"timeouts.relay_client_idle_hard_secs must be >= timeouts.relay_client_idle_soft_secs"
|
||||
),
|
||||
"error must explain the violated hard>=soft invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_relay_grace_larger_than_hard_idle_with_clear_error() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
relay_client_idle_soft_secs = 60
|
||||
relay_client_idle_hard_secs = 120
|
||||
relay_idle_grace_after_downstream_activity_secs = 121
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path).expect_err("config with grace>hard must fail");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("timeouts.relay_idle_grace_after_downstream_activity_secs must be <= timeouts.relay_client_idle_hard_secs"),
|
||||
"error must explain the violated grace<=hard invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_zero_handshake_timeout_with_clear_error() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
client_handshake = 0
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path).expect_err("config with zero handshake timeout must fail");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("timeouts.client_handshake must be > 0"),
|
||||
"error must explain that handshake timeout must be positive, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
use super::*;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn write_temp_config(contents: &str) -> PathBuf {
|
||||
let nonce = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("system time must be after unix epoch")
|
||||
.as_nanos();
|
||||
let path = std::env::temp_dir().join(format!(
|
||||
"telemt-load-mask-prefetch-timeout-security-{nonce}.toml"
|
||||
));
|
||||
fs::write(&path, contents).expect("temp config write must succeed");
|
||||
path
|
||||
}
|
||||
|
||||
fn remove_temp_config(path: &PathBuf) {
|
||||
let _ = fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_mask_classifier_prefetch_timeout_below_min_bound() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_classifier_prefetch_timeout_ms = 4
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path)
|
||||
.expect_err("prefetch timeout below minimum security bound must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_classifier_prefetch_timeout_ms must be within [5, 50]"),
|
||||
"error must explain timeout bound invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_mask_classifier_prefetch_timeout_above_max_bound() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_classifier_prefetch_timeout_ms = 51
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path)
|
||||
.expect_err("prefetch timeout above max security bound must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_classifier_prefetch_timeout_ms must be within [5, 50]"),
|
||||
"error must explain timeout bound invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_mask_classifier_prefetch_timeout_within_bounds() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_classifier_prefetch_timeout_ms = 20
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg =
|
||||
ProxyConfig::load(&path).expect("prefetch timeout within security bounds must be accepted");
|
||||
assert_eq!(cfg.censorship.mask_classifier_prefetch_timeout_ms, 20);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
|
@ -0,0 +1,292 @@
|
|||
use super::*;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn write_temp_config(contents: &str) -> PathBuf {
|
||||
let nonce = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("system time must be after unix epoch")
|
||||
.as_nanos();
|
||||
let path = std::env::temp_dir().join(format!("telemt-load-mask-shape-security-{nonce}.toml"));
|
||||
fs::write(&path, contents).expect("temp config write must succeed");
|
||||
path
|
||||
}
|
||||
|
||||
fn remove_temp_config(path: &PathBuf) {
|
||||
let _ = fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_zero_mask_shape_bucket_floor_bytes() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_bucket_floor_bytes = 0
|
||||
mask_shape_bucket_cap_bytes = 4096
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("zero mask_shape_bucket_floor_bytes must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_shape_bucket_floor_bytes must be > 0"),
|
||||
"error must explain floor>0 invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_mask_shape_bucket_cap_less_than_floor() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_bucket_floor_bytes = 1024
|
||||
mask_shape_bucket_cap_bytes = 512
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("mask_shape_bucket_cap_bytes < floor must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains(
|
||||
"censorship.mask_shape_bucket_cap_bytes must be >= censorship.mask_shape_bucket_floor_bytes"
|
||||
),
|
||||
"error must explain cap>=floor invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_mask_shape_bucket_cap_equal_to_floor() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_hardening = true
|
||||
mask_shape_bucket_floor_bytes = 1024
|
||||
mask_shape_bucket_cap_bytes = 1024
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg = ProxyConfig::load(&path).expect("equal cap and floor must be accepted");
|
||||
assert!(cfg.censorship.mask_shape_hardening);
|
||||
assert_eq!(cfg.censorship.mask_shape_bucket_floor_bytes, 1024);
|
||||
assert_eq!(cfg.censorship.mask_shape_bucket_cap_bytes, 1024);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_above_cap_blur_when_shape_hardening_disabled() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_hardening = false
|
||||
mask_shape_above_cap_blur = true
|
||||
mask_shape_above_cap_blur_max_bytes = 64
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("above-cap blur must require shape hardening enabled");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains(
|
||||
"censorship.mask_shape_above_cap_blur requires censorship.mask_shape_hardening = true"
|
||||
),
|
||||
"error must explain blur prerequisite, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_above_cap_blur_with_zero_max_bytes() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_hardening = true
|
||||
mask_shape_above_cap_blur = true
|
||||
mask_shape_above_cap_blur_max_bytes = 0
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("above-cap blur max bytes must be > 0 when enabled");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_shape_above_cap_blur_max_bytes must be > 0 when censorship.mask_shape_above_cap_blur is enabled"),
|
||||
"error must explain blur max bytes invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_timing_normalization_floor_zero_when_enabled() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_timing_normalization_enabled = true
|
||||
mask_timing_normalization_floor_ms = 0
|
||||
mask_timing_normalization_ceiling_ms = 200
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("timing normalization floor must be > 0 when enabled");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_timing_normalization_floor_ms must be > 0 when censorship.mask_timing_normalization_enabled is true"),
|
||||
"error must explain timing floor invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_timing_normalization_ceiling_below_floor() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_timing_normalization_enabled = true
|
||||
mask_timing_normalization_floor_ms = 220
|
||||
mask_timing_normalization_ceiling_ms = 200
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path).expect_err("timing normalization ceiling must be >= floor");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_timing_normalization_ceiling_ms must be >= censorship.mask_timing_normalization_floor_ms"),
|
||||
"error must explain timing ceiling/floor invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_valid_timing_normalization_and_above_cap_blur_config() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_hardening = true
|
||||
mask_shape_above_cap_blur = true
|
||||
mask_shape_above_cap_blur_max_bytes = 128
|
||||
mask_timing_normalization_enabled = true
|
||||
mask_timing_normalization_floor_ms = 150
|
||||
mask_timing_normalization_ceiling_ms = 240
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg = ProxyConfig::load(&path)
|
||||
.expect("valid blur and timing normalization settings must be accepted");
|
||||
assert!(cfg.censorship.mask_shape_hardening);
|
||||
assert!(cfg.censorship.mask_shape_above_cap_blur);
|
||||
assert_eq!(cfg.censorship.mask_shape_above_cap_blur_max_bytes, 128);
|
||||
assert!(cfg.censorship.mask_timing_normalization_enabled);
|
||||
assert_eq!(cfg.censorship.mask_timing_normalization_floor_ms, 150);
|
||||
assert_eq!(cfg.censorship.mask_timing_normalization_ceiling_ms, 240);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_aggressive_shape_mode_when_shape_hardening_disabled() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_hardening = false
|
||||
mask_shape_hardening_aggressive_mode = true
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path)
|
||||
.expect_err("aggressive shape hardening mode must require shape hardening enabled");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_shape_hardening_aggressive_mode requires censorship.mask_shape_hardening = true"),
|
||||
"error must explain aggressive-mode prerequisite, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_aggressive_shape_mode_when_shape_hardening_enabled() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_hardening = true
|
||||
mask_shape_hardening_aggressive_mode = true
|
||||
mask_shape_above_cap_blur = true
|
||||
mask_shape_above_cap_blur_max_bytes = 8
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg = ProxyConfig::load(&path)
|
||||
.expect("aggressive shape hardening mode should be accepted when prerequisites are met");
|
||||
assert!(cfg.censorship.mask_shape_hardening);
|
||||
assert!(cfg.censorship.mask_shape_hardening_aggressive_mode);
|
||||
assert!(cfg.censorship.mask_shape_above_cap_blur);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_zero_mask_relay_max_bytes() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_relay_max_bytes = 0
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path).expect_err("mask_relay_max_bytes must be > 0");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_relay_max_bytes must be > 0"),
|
||||
"error must explain non-zero relay cap invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_mask_relay_max_bytes_above_upper_bound() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_relay_max_bytes = 67108865
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("mask_relay_max_bytes above hard cap must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_relay_max_bytes must be <= 67108864"),
|
||||
"error must explain relay cap upper bound invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_valid_mask_relay_max_bytes() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_relay_max_bytes = 8388608
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg = ProxyConfig::load(&path).expect("valid mask_relay_max_bytes must be accepted");
|
||||
assert_eq!(cfg.censorship.mask_relay_max_bytes, 8_388_608);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
use super::*;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn write_temp_config(contents: &str) -> PathBuf {
|
||||
let nonce = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("system time must be after unix epoch")
|
||||
.as_nanos();
|
||||
let path = std::env::temp_dir().join(format!("telemt-load-security-{nonce}.toml"));
|
||||
fs::write(&path, contents).expect("temp config write must succeed");
|
||||
path
|
||||
}
|
||||
|
||||
fn remove_temp_config(path: &PathBuf) {
|
||||
let _ = fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_server_hello_delay_equal_to_handshake_timeout_budget() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
client_handshake = 1
|
||||
|
||||
[censorship]
|
||||
server_hello_delay_max_ms = 1000
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("delay equal to handshake timeout must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains(
|
||||
"censorship.server_hello_delay_max_ms must be < timeouts.client_handshake * 1000"
|
||||
),
|
||||
"error must explain delay<timeout invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_server_hello_delay_larger_than_handshake_timeout_budget() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
client_handshake = 1
|
||||
|
||||
[censorship]
|
||||
server_hello_delay_max_ms = 1500
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("delay larger than handshake timeout must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains(
|
||||
"censorship.server_hello_delay_max_ms must be < timeouts.client_handshake * 1000"
|
||||
),
|
||||
"error must explain delay<timeout invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_server_hello_delay_strictly_below_handshake_timeout_budget() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
client_handshake = 1
|
||||
|
||||
[censorship]
|
||||
server_hello_delay_max_ms = 999
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg =
|
||||
ProxyConfig::load(&path).expect("delay below handshake timeout budget must be accepted");
|
||||
assert_eq!(cfg.timeouts.client_handshake, 1);
|
||||
assert_eq!(cfg.censorship.server_hello_delay_max_ms, 999);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,755 @@
|
|||
use std::collections::BTreeSet;
|
||||
use std::net::IpAddr;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio::process::Command;
|
||||
use tokio::sync::{mpsc, watch};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::config::{ConntrackBackend, ConntrackMode, ProxyConfig};
|
||||
use crate::proxy::middle_relay::note_global_relay_pressure;
|
||||
use crate::proxy::shared_state::{ConntrackCloseEvent, ConntrackCloseReason, ProxySharedState};
|
||||
use crate::stats::Stats;
|
||||
|
||||
const CONNTRACK_EVENT_QUEUE_CAPACITY: usize = 32_768;
|
||||
const PRESSURE_RELEASE_TICKS: u8 = 3;
|
||||
const PRESSURE_SAMPLE_INTERVAL: Duration = Duration::from_secs(1);
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
enum NetfilterBackend {
|
||||
Nftables,
|
||||
Iptables,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct PressureSample {
|
||||
conn_pct: Option<u8>,
|
||||
fd_pct: Option<u8>,
|
||||
accept_timeout_delta: u64,
|
||||
me_queue_pressure_delta: u64,
|
||||
}
|
||||
|
||||
struct PressureState {
|
||||
active: bool,
|
||||
low_streak: u8,
|
||||
prev_accept_timeout_total: u64,
|
||||
prev_me_queue_pressure_total: u64,
|
||||
}
|
||||
|
||||
impl PressureState {
|
||||
fn new(stats: &Stats) -> Self {
|
||||
Self {
|
||||
active: false,
|
||||
low_streak: 0,
|
||||
prev_accept_timeout_total: stats.get_accept_permit_timeout_total(),
|
||||
prev_me_queue_pressure_total: stats.get_me_c2me_send_full_total(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn spawn_conntrack_controller(
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
stats: Arc<Stats>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
) {
|
||||
if !cfg!(target_os = "linux") {
|
||||
let enabled = config_rx
|
||||
.borrow()
|
||||
.server
|
||||
.conntrack_control
|
||||
.inline_conntrack_control;
|
||||
stats.set_conntrack_control_enabled(enabled);
|
||||
stats.set_conntrack_control_available(false);
|
||||
stats.set_conntrack_pressure_active(false);
|
||||
stats.set_conntrack_event_queue_depth(0);
|
||||
stats.set_conntrack_rule_apply_ok(false);
|
||||
shared.disable_conntrack_close_sender();
|
||||
shared.set_conntrack_pressure_active(false);
|
||||
if enabled {
|
||||
warn!(
|
||||
"conntrack control is configured but unsupported on this OS; disabling runtime worker"
|
||||
);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let (tx, rx) = mpsc::channel(CONNTRACK_EVENT_QUEUE_CAPACITY);
|
||||
shared.set_conntrack_close_sender(tx);
|
||||
tokio::spawn(async move {
|
||||
run_conntrack_controller(config_rx, stats, shared, rx).await;
|
||||
});
|
||||
}
|
||||
|
||||
async fn run_conntrack_controller(
|
||||
mut config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
stats: Arc<Stats>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
mut close_rx: mpsc::Receiver<ConntrackCloseEvent>,
|
||||
) {
|
||||
let mut cfg = config_rx.borrow().clone();
|
||||
let mut pressure_state = PressureState::new(stats.as_ref());
|
||||
let mut delete_budget_tokens = cfg.server.conntrack_control.delete_budget_per_sec;
|
||||
let mut backend = pick_backend(cfg.server.conntrack_control.backend);
|
||||
|
||||
apply_runtime_state(
|
||||
stats.as_ref(),
|
||||
shared.as_ref(),
|
||||
&cfg,
|
||||
backend.is_some(),
|
||||
false,
|
||||
);
|
||||
reconcile_rules(&cfg, backend, stats.as_ref()).await;
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
changed = config_rx.changed() => {
|
||||
if changed.is_err() {
|
||||
break;
|
||||
}
|
||||
cfg = config_rx.borrow_and_update().clone();
|
||||
backend = pick_backend(cfg.server.conntrack_control.backend);
|
||||
delete_budget_tokens = cfg.server.conntrack_control.delete_budget_per_sec;
|
||||
apply_runtime_state(stats.as_ref(), shared.as_ref(), &cfg, backend.is_some(), pressure_state.active);
|
||||
reconcile_rules(&cfg, backend, stats.as_ref()).await;
|
||||
}
|
||||
event = close_rx.recv() => {
|
||||
let Some(event) = event else {
|
||||
break;
|
||||
};
|
||||
stats.set_conntrack_event_queue_depth(close_rx.len() as u64);
|
||||
if !cfg.server.conntrack_control.inline_conntrack_control {
|
||||
continue;
|
||||
}
|
||||
if !pressure_state.active {
|
||||
continue;
|
||||
}
|
||||
if !matches!(event.reason, ConntrackCloseReason::Timeout | ConntrackCloseReason::Pressure | ConntrackCloseReason::Reset) {
|
||||
continue;
|
||||
}
|
||||
if delete_budget_tokens == 0 {
|
||||
continue;
|
||||
}
|
||||
stats.increment_conntrack_delete_attempt_total();
|
||||
match delete_conntrack_entry(event).await {
|
||||
DeleteOutcome::Deleted => {
|
||||
delete_budget_tokens = delete_budget_tokens.saturating_sub(1);
|
||||
stats.increment_conntrack_delete_success_total();
|
||||
}
|
||||
DeleteOutcome::NotFound => {
|
||||
delete_budget_tokens = delete_budget_tokens.saturating_sub(1);
|
||||
stats.increment_conntrack_delete_not_found_total();
|
||||
}
|
||||
DeleteOutcome::Error => {
|
||||
delete_budget_tokens = delete_budget_tokens.saturating_sub(1);
|
||||
stats.increment_conntrack_delete_error_total();
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = tokio::time::sleep(PRESSURE_SAMPLE_INTERVAL) => {
|
||||
delete_budget_tokens = cfg.server.conntrack_control.delete_budget_per_sec;
|
||||
stats.set_conntrack_event_queue_depth(close_rx.len() as u64);
|
||||
let sample = collect_pressure_sample(stats.as_ref(), &cfg, &mut pressure_state);
|
||||
update_pressure_state(
|
||||
stats.as_ref(),
|
||||
shared.as_ref(),
|
||||
&cfg,
|
||||
&sample,
|
||||
&mut pressure_state,
|
||||
);
|
||||
if pressure_state.active {
|
||||
note_global_relay_pressure(shared.as_ref());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
shared.disable_conntrack_close_sender();
|
||||
shared.set_conntrack_pressure_active(false);
|
||||
stats.set_conntrack_pressure_active(false);
|
||||
}
|
||||
|
||||
fn apply_runtime_state(
|
||||
stats: &Stats,
|
||||
shared: &ProxySharedState,
|
||||
cfg: &ProxyConfig,
|
||||
backend_available: bool,
|
||||
pressure_active: bool,
|
||||
) {
|
||||
let enabled = cfg.server.conntrack_control.inline_conntrack_control;
|
||||
let available = enabled && backend_available && has_cap_net_admin();
|
||||
if enabled && !available {
|
||||
warn!(
|
||||
"conntrack control enabled but unavailable (missing CAP_NET_ADMIN or backend binaries)"
|
||||
);
|
||||
}
|
||||
stats.set_conntrack_control_enabled(enabled);
|
||||
stats.set_conntrack_control_available(available);
|
||||
shared.set_conntrack_pressure_active(enabled && pressure_active);
|
||||
stats.set_conntrack_pressure_active(enabled && pressure_active);
|
||||
}
|
||||
|
||||
fn collect_pressure_sample(
|
||||
stats: &Stats,
|
||||
cfg: &ProxyConfig,
|
||||
state: &mut PressureState,
|
||||
) -> PressureSample {
|
||||
let current_connections = stats.get_current_connections_total();
|
||||
let conn_pct = if cfg.server.max_connections == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
((current_connections.saturating_mul(100)) / u64::from(cfg.server.max_connections))
|
||||
.min(100) as u8,
|
||||
)
|
||||
};
|
||||
|
||||
let fd_pct = fd_usage_pct();
|
||||
|
||||
let accept_total = stats.get_accept_permit_timeout_total();
|
||||
let accept_delta = accept_total.saturating_sub(state.prev_accept_timeout_total);
|
||||
state.prev_accept_timeout_total = accept_total;
|
||||
|
||||
let me_total = stats.get_me_c2me_send_full_total();
|
||||
let me_delta = me_total.saturating_sub(state.prev_me_queue_pressure_total);
|
||||
state.prev_me_queue_pressure_total = me_total;
|
||||
|
||||
PressureSample {
|
||||
conn_pct,
|
||||
fd_pct,
|
||||
accept_timeout_delta: accept_delta,
|
||||
me_queue_pressure_delta: me_delta,
|
||||
}
|
||||
}
|
||||
|
||||
fn update_pressure_state(
|
||||
stats: &Stats,
|
||||
shared: &ProxySharedState,
|
||||
cfg: &ProxyConfig,
|
||||
sample: &PressureSample,
|
||||
state: &mut PressureState,
|
||||
) {
|
||||
if !cfg.server.conntrack_control.inline_conntrack_control {
|
||||
if state.active {
|
||||
state.active = false;
|
||||
state.low_streak = 0;
|
||||
shared.set_conntrack_pressure_active(false);
|
||||
stats.set_conntrack_pressure_active(false);
|
||||
info!("Conntrack pressure mode deactivated (feature disabled)");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let high = cfg.server.conntrack_control.pressure_high_watermark_pct;
|
||||
let low = cfg.server.conntrack_control.pressure_low_watermark_pct;
|
||||
|
||||
let high_hit = sample.conn_pct.is_some_and(|v| v >= high)
|
||||
|| sample.fd_pct.is_some_and(|v| v >= high)
|
||||
|| sample.accept_timeout_delta > 0
|
||||
|| sample.me_queue_pressure_delta > 0;
|
||||
|
||||
let low_clear = sample.conn_pct.is_none_or(|v| v <= low)
|
||||
&& sample.fd_pct.is_none_or(|v| v <= low)
|
||||
&& sample.accept_timeout_delta == 0
|
||||
&& sample.me_queue_pressure_delta == 0;
|
||||
|
||||
if !state.active && high_hit {
|
||||
state.active = true;
|
||||
state.low_streak = 0;
|
||||
shared.set_conntrack_pressure_active(true);
|
||||
stats.set_conntrack_pressure_active(true);
|
||||
info!(
|
||||
conn_pct = ?sample.conn_pct,
|
||||
fd_pct = ?sample.fd_pct,
|
||||
accept_timeout_delta = sample.accept_timeout_delta,
|
||||
me_queue_pressure_delta = sample.me_queue_pressure_delta,
|
||||
"Conntrack pressure mode activated"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if state.active && low_clear {
|
||||
state.low_streak = state.low_streak.saturating_add(1);
|
||||
if state.low_streak >= PRESSURE_RELEASE_TICKS {
|
||||
state.active = false;
|
||||
state.low_streak = 0;
|
||||
shared.set_conntrack_pressure_active(false);
|
||||
stats.set_conntrack_pressure_active(false);
|
||||
info!("Conntrack pressure mode deactivated");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
state.low_streak = 0;
|
||||
}
|
||||
|
||||
async fn reconcile_rules(cfg: &ProxyConfig, backend: Option<NetfilterBackend>, stats: &Stats) {
|
||||
if !cfg.server.conntrack_control.inline_conntrack_control {
|
||||
clear_notrack_rules_all_backends().await;
|
||||
stats.set_conntrack_rule_apply_ok(true);
|
||||
return;
|
||||
}
|
||||
|
||||
if !has_cap_net_admin() {
|
||||
stats.set_conntrack_rule_apply_ok(false);
|
||||
return;
|
||||
}
|
||||
|
||||
let Some(backend) = backend else {
|
||||
stats.set_conntrack_rule_apply_ok(false);
|
||||
return;
|
||||
};
|
||||
|
||||
let apply_result = match backend {
|
||||
NetfilterBackend::Nftables => apply_nft_rules(cfg).await,
|
||||
NetfilterBackend::Iptables => apply_iptables_rules(cfg).await,
|
||||
};
|
||||
|
||||
if let Err(error) = apply_result {
|
||||
warn!(error = %error, "Failed to reconcile conntrack/notrack rules");
|
||||
stats.set_conntrack_rule_apply_ok(false);
|
||||
} else {
|
||||
stats.set_conntrack_rule_apply_ok(true);
|
||||
}
|
||||
}
|
||||
|
||||
fn pick_backend(configured: ConntrackBackend) -> Option<NetfilterBackend> {
|
||||
match configured {
|
||||
ConntrackBackend::Auto => {
|
||||
if command_exists("nft") {
|
||||
Some(NetfilterBackend::Nftables)
|
||||
} else if command_exists("iptables") {
|
||||
Some(NetfilterBackend::Iptables)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
ConntrackBackend::Nftables => command_exists("nft").then_some(NetfilterBackend::Nftables),
|
||||
ConntrackBackend::Iptables => {
|
||||
command_exists("iptables").then_some(NetfilterBackend::Iptables)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn command_exists(binary: &str) -> bool {
|
||||
let Some(path_var) = std::env::var_os("PATH") else {
|
||||
return false;
|
||||
};
|
||||
std::env::split_paths(&path_var).any(|dir| {
|
||||
let candidate: PathBuf = dir.join(binary);
|
||||
candidate.exists() && candidate.is_file()
|
||||
})
|
||||
}
|
||||
|
||||
fn notrack_targets(cfg: &ProxyConfig) -> (Vec<Option<IpAddr>>, Vec<Option<IpAddr>>) {
|
||||
let mode = cfg.server.conntrack_control.mode;
|
||||
let mut v4_targets: BTreeSet<Option<IpAddr>> = BTreeSet::new();
|
||||
let mut v6_targets: BTreeSet<Option<IpAddr>> = BTreeSet::new();
|
||||
|
||||
match mode {
|
||||
ConntrackMode::Tracked => {}
|
||||
ConntrackMode::Notrack => {
|
||||
if cfg.server.listeners.is_empty() {
|
||||
if let Some(ipv4) = cfg
|
||||
.server
|
||||
.listen_addr_ipv4
|
||||
.as_ref()
|
||||
.and_then(|s| s.parse::<IpAddr>().ok())
|
||||
{
|
||||
if ipv4.is_unspecified() {
|
||||
v4_targets.insert(None);
|
||||
} else {
|
||||
v4_targets.insert(Some(ipv4));
|
||||
}
|
||||
}
|
||||
if let Some(ipv6) = cfg
|
||||
.server
|
||||
.listen_addr_ipv6
|
||||
.as_ref()
|
||||
.and_then(|s| s.parse::<IpAddr>().ok())
|
||||
{
|
||||
if ipv6.is_unspecified() {
|
||||
v6_targets.insert(None);
|
||||
} else {
|
||||
v6_targets.insert(Some(ipv6));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for listener in &cfg.server.listeners {
|
||||
if listener.ip.is_ipv4() {
|
||||
if listener.ip.is_unspecified() {
|
||||
v4_targets.insert(None);
|
||||
} else {
|
||||
v4_targets.insert(Some(listener.ip));
|
||||
}
|
||||
} else if listener.ip.is_unspecified() {
|
||||
v6_targets.insert(None);
|
||||
} else {
|
||||
v6_targets.insert(Some(listener.ip));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ConntrackMode::Hybrid => {
|
||||
for ip in &cfg.server.conntrack_control.hybrid_listener_ips {
|
||||
if ip.is_ipv4() {
|
||||
v4_targets.insert(Some(*ip));
|
||||
} else {
|
||||
v6_targets.insert(Some(*ip));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
(
|
||||
v4_targets.into_iter().collect(),
|
||||
v6_targets.into_iter().collect(),
|
||||
)
|
||||
}
|
||||
|
||||
async fn apply_nft_rules(cfg: &ProxyConfig) -> Result<(), String> {
|
||||
let _ = run_command(
|
||||
"nft",
|
||||
&["delete", "table", "inet", "telemt_conntrack"],
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
if matches!(cfg.server.conntrack_control.mode, ConntrackMode::Tracked) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let (v4_targets, v6_targets) = notrack_targets(cfg);
|
||||
let mut rules = Vec::new();
|
||||
for ip in v4_targets {
|
||||
let rule = if let Some(ip) = ip {
|
||||
format!("tcp dport {} ip daddr {} notrack", cfg.server.port, ip)
|
||||
} else {
|
||||
format!("tcp dport {} notrack", cfg.server.port)
|
||||
};
|
||||
rules.push(rule);
|
||||
}
|
||||
for ip in v6_targets {
|
||||
let rule = if let Some(ip) = ip {
|
||||
format!("tcp dport {} ip6 daddr {} notrack", cfg.server.port, ip)
|
||||
} else {
|
||||
format!("tcp dport {} notrack", cfg.server.port)
|
||||
};
|
||||
rules.push(rule);
|
||||
}
|
||||
|
||||
let rule_blob = if rules.is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
format!(" {}\n", rules.join("\n "))
|
||||
};
|
||||
let script = format!(
|
||||
"table inet telemt_conntrack {{\n chain preraw {{\n type filter hook prerouting priority raw; policy accept;\n{rule_blob} }}\n}}\n"
|
||||
);
|
||||
run_command("nft", &["-f", "-"], Some(script)).await
|
||||
}
|
||||
|
||||
async fn apply_iptables_rules(cfg: &ProxyConfig) -> Result<(), String> {
|
||||
apply_iptables_rules_for_binary("iptables", cfg, true).await?;
|
||||
apply_iptables_rules_for_binary("ip6tables", cfg, false).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn apply_iptables_rules_for_binary(
|
||||
binary: &str,
|
||||
cfg: &ProxyConfig,
|
||||
ipv4: bool,
|
||||
) -> Result<(), String> {
|
||||
if !command_exists(binary) {
|
||||
return Ok(());
|
||||
}
|
||||
let chain = "TELEMT_NOTRACK";
|
||||
let _ = run_command(
|
||||
binary,
|
||||
&["-t", "raw", "-D", "PREROUTING", "-j", chain],
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let _ = run_command(binary, &["-t", "raw", "-F", chain], None).await;
|
||||
let _ = run_command(binary, &["-t", "raw", "-X", chain], None).await;
|
||||
|
||||
if matches!(cfg.server.conntrack_control.mode, ConntrackMode::Tracked) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
run_command(binary, &["-t", "raw", "-N", chain], None).await?;
|
||||
run_command(binary, &["-t", "raw", "-F", chain], None).await?;
|
||||
if run_command(
|
||||
binary,
|
||||
&["-t", "raw", "-C", "PREROUTING", "-j", chain],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
run_command(
|
||||
binary,
|
||||
&["-t", "raw", "-I", "PREROUTING", "1", "-j", chain],
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let (v4_targets, v6_targets) = notrack_targets(cfg);
|
||||
let selected = if ipv4 { v4_targets } else { v6_targets };
|
||||
for ip in selected {
|
||||
let mut args = vec![
|
||||
"-t".to_string(),
|
||||
"raw".to_string(),
|
||||
"-A".to_string(),
|
||||
chain.to_string(),
|
||||
"-p".to_string(),
|
||||
"tcp".to_string(),
|
||||
"--dport".to_string(),
|
||||
cfg.server.port.to_string(),
|
||||
];
|
||||
if let Some(ip) = ip {
|
||||
args.push("-d".to_string());
|
||||
args.push(ip.to_string());
|
||||
}
|
||||
args.push("-j".to_string());
|
||||
args.push("CT".to_string());
|
||||
args.push("--notrack".to_string());
|
||||
let arg_refs: Vec<&str> = args.iter().map(String::as_str).collect();
|
||||
run_command(binary, &arg_refs, None).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn clear_notrack_rules_all_backends() {
|
||||
let _ = run_command(
|
||||
"nft",
|
||||
&["delete", "table", "inet", "telemt_conntrack"],
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let _ = run_command(
|
||||
"iptables",
|
||||
&["-t", "raw", "-D", "PREROUTING", "-j", "TELEMT_NOTRACK"],
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let _ = run_command("iptables", &["-t", "raw", "-F", "TELEMT_NOTRACK"], None).await;
|
||||
let _ = run_command("iptables", &["-t", "raw", "-X", "TELEMT_NOTRACK"], None).await;
|
||||
let _ = run_command(
|
||||
"ip6tables",
|
||||
&["-t", "raw", "-D", "PREROUTING", "-j", "TELEMT_NOTRACK"],
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let _ = run_command("ip6tables", &["-t", "raw", "-F", "TELEMT_NOTRACK"], None).await;
|
||||
let _ = run_command("ip6tables", &["-t", "raw", "-X", "TELEMT_NOTRACK"], None).await;
|
||||
}
|
||||
|
||||
enum DeleteOutcome {
|
||||
Deleted,
|
||||
NotFound,
|
||||
Error,
|
||||
}
|
||||
|
||||
async fn delete_conntrack_entry(event: ConntrackCloseEvent) -> DeleteOutcome {
|
||||
if !command_exists("conntrack") {
|
||||
return DeleteOutcome::Error;
|
||||
}
|
||||
let args = vec![
|
||||
"-D".to_string(),
|
||||
"-p".to_string(),
|
||||
"tcp".to_string(),
|
||||
"-s".to_string(),
|
||||
event.src.ip().to_string(),
|
||||
"--sport".to_string(),
|
||||
event.src.port().to_string(),
|
||||
"-d".to_string(),
|
||||
event.dst.ip().to_string(),
|
||||
"--dport".to_string(),
|
||||
event.dst.port().to_string(),
|
||||
];
|
||||
let arg_refs: Vec<&str> = args.iter().map(String::as_str).collect();
|
||||
match run_command("conntrack", &arg_refs, None).await {
|
||||
Ok(()) => DeleteOutcome::Deleted,
|
||||
Err(error) => {
|
||||
if error.contains("0 flow entries have been deleted") {
|
||||
DeleteOutcome::NotFound
|
||||
} else {
|
||||
debug!(error = %error, "conntrack delete failed");
|
||||
DeleteOutcome::Error
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_command(binary: &str, args: &[&str], stdin: Option<String>) -> Result<(), String> {
|
||||
if !command_exists(binary) {
|
||||
return Err(format!("{binary} is not available"));
|
||||
}
|
||||
let mut command = Command::new(binary);
|
||||
command.args(args);
|
||||
if stdin.is_some() {
|
||||
command.stdin(std::process::Stdio::piped());
|
||||
}
|
||||
command.stdout(std::process::Stdio::null());
|
||||
command.stderr(std::process::Stdio::piped());
|
||||
let mut child = command
|
||||
.spawn()
|
||||
.map_err(|e| format!("spawn {binary} failed: {e}"))?;
|
||||
if let Some(blob) = stdin
|
||||
&& let Some(mut writer) = child.stdin.take()
|
||||
{
|
||||
writer
|
||||
.write_all(blob.as_bytes())
|
||||
.await
|
||||
.map_err(|e| format!("stdin write {binary} failed: {e}"))?;
|
||||
}
|
||||
let output = child
|
||||
.wait_with_output()
|
||||
.await
|
||||
.map_err(|e| format!("wait {binary} failed: {e}"))?;
|
||||
if output.status.success() {
|
||||
return Ok(());
|
||||
}
|
||||
let stderr = String::from_utf8_lossy(&output.stderr).trim().to_string();
|
||||
Err(if stderr.is_empty() {
|
||||
format!("{binary} exited with status {}", output.status)
|
||||
} else {
|
||||
stderr
|
||||
})
|
||||
}
|
||||
|
||||
fn fd_usage_pct() -> Option<u8> {
|
||||
let soft_limit = nofile_soft_limit()?;
|
||||
if soft_limit == 0 {
|
||||
return None;
|
||||
}
|
||||
let fd_count = std::fs::read_dir("/proc/self/fd").ok()?.count() as u64;
|
||||
Some(((fd_count.saturating_mul(100)) / soft_limit).min(100) as u8)
|
||||
}
|
||||
|
||||
fn nofile_soft_limit() -> Option<u64> {
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
let mut lim = libc::rlimit {
|
||||
rlim_cur: 0,
|
||||
rlim_max: 0,
|
||||
};
|
||||
let rc = unsafe { libc::getrlimit(libc::RLIMIT_NOFILE, &mut lim) };
|
||||
if rc != 0 {
|
||||
return None;
|
||||
}
|
||||
return Some(lim.rlim_cur);
|
||||
}
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
{
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn has_cap_net_admin() -> bool {
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
let Ok(status) = std::fs::read_to_string("/proc/self/status") else {
|
||||
return false;
|
||||
};
|
||||
for line in status.lines() {
|
||||
if let Some(raw) = line.strip_prefix("CapEff:") {
|
||||
let caps = raw.trim();
|
||||
if let Ok(bits) = u64::from_str_radix(caps, 16) {
|
||||
const CAP_NET_ADMIN_BIT: u64 = 12;
|
||||
return (bits & (1u64 << CAP_NET_ADMIN_BIT)) != 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
{
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::config::ProxyConfig;
|
||||
|
||||
#[test]
|
||||
fn pressure_activates_on_accept_timeout_spike() {
|
||||
let stats = Stats::new();
|
||||
let shared = ProxySharedState::new();
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.server.conntrack_control.inline_conntrack_control = true;
|
||||
let mut state = PressureState::new(&stats);
|
||||
let sample = PressureSample {
|
||||
conn_pct: Some(10),
|
||||
fd_pct: Some(10),
|
||||
accept_timeout_delta: 1,
|
||||
me_queue_pressure_delta: 0,
|
||||
};
|
||||
|
||||
update_pressure_state(&stats, shared.as_ref(), &cfg, &sample, &mut state);
|
||||
|
||||
assert!(state.active);
|
||||
assert!(shared.conntrack_pressure_active());
|
||||
assert!(stats.get_conntrack_pressure_active());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pressure_releases_after_hysteresis_window() {
|
||||
let stats = Stats::new();
|
||||
let shared = ProxySharedState::new();
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.server.conntrack_control.inline_conntrack_control = true;
|
||||
let mut state = PressureState::new(&stats);
|
||||
|
||||
let high_sample = PressureSample {
|
||||
conn_pct: Some(95),
|
||||
fd_pct: Some(95),
|
||||
accept_timeout_delta: 0,
|
||||
me_queue_pressure_delta: 0,
|
||||
};
|
||||
update_pressure_state(&stats, shared.as_ref(), &cfg, &high_sample, &mut state);
|
||||
assert!(state.active);
|
||||
|
||||
let low_sample = PressureSample {
|
||||
conn_pct: Some(10),
|
||||
fd_pct: Some(10),
|
||||
accept_timeout_delta: 0,
|
||||
me_queue_pressure_delta: 0,
|
||||
};
|
||||
update_pressure_state(&stats, shared.as_ref(), &cfg, &low_sample, &mut state);
|
||||
assert!(state.active);
|
||||
update_pressure_state(&stats, shared.as_ref(), &cfg, &low_sample, &mut state);
|
||||
assert!(state.active);
|
||||
update_pressure_state(&stats, shared.as_ref(), &cfg, &low_sample, &mut state);
|
||||
|
||||
assert!(!state.active);
|
||||
assert!(!shared.conntrack_pressure_active());
|
||||
assert!(!stats.get_conntrack_pressure_active());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pressure_does_not_activate_when_disabled() {
|
||||
let stats = Stats::new();
|
||||
let shared = ProxySharedState::new();
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.server.conntrack_control.inline_conntrack_control = false;
|
||||
let mut state = PressureState::new(&stats);
|
||||
let sample = PressureSample {
|
||||
conn_pct: Some(100),
|
||||
fd_pct: Some(100),
|
||||
accept_timeout_delta: 10,
|
||||
me_queue_pressure_delta: 10,
|
||||
};
|
||||
|
||||
update_pressure_state(&stats, shared.as_ref(), &cfg, &sample, &mut state);
|
||||
|
||||
assert!(!state.active);
|
||||
assert!(!shared.conntrack_pressure_active());
|
||||
assert!(!stats.get_conntrack_pressure_active());
|
||||
}
|
||||
}
|
||||
|
|
@ -13,10 +13,13 @@
|
|||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use aes::Aes256;
|
||||
use ctr::{Ctr128BE, cipher::{KeyIvInit, StreamCipher}};
|
||||
use zeroize::Zeroize;
|
||||
use crate::error::{ProxyError, Result};
|
||||
use aes::Aes256;
|
||||
use ctr::{
|
||||
Ctr128BE,
|
||||
cipher::{KeyIvInit, StreamCipher},
|
||||
};
|
||||
use zeroize::Zeroize;
|
||||
|
||||
type Aes256Ctr = Ctr128BE<Aes256>;
|
||||
|
||||
|
|
@ -42,33 +45,39 @@ impl AesCtr {
|
|||
cipher: Aes256Ctr::new(key.into(), (&iv_bytes).into()),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Create from key and IV slices
|
||||
pub fn from_key_iv(key: &[u8], iv: &[u8]) -> Result<Self> {
|
||||
if key.len() != 32 {
|
||||
return Err(ProxyError::InvalidKeyLength { expected: 32, got: key.len() });
|
||||
return Err(ProxyError::InvalidKeyLength {
|
||||
expected: 32,
|
||||
got: key.len(),
|
||||
});
|
||||
}
|
||||
if iv.len() != 16 {
|
||||
return Err(ProxyError::InvalidKeyLength { expected: 16, got: iv.len() });
|
||||
return Err(ProxyError::InvalidKeyLength {
|
||||
expected: 16,
|
||||
got: iv.len(),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
let key: [u8; 32] = key.try_into().unwrap();
|
||||
let iv = u128::from_be_bytes(iv.try_into().unwrap());
|
||||
Ok(Self::new(&key, iv))
|
||||
}
|
||||
|
||||
|
||||
/// Encrypt/decrypt data in-place (CTR mode is symmetric)
|
||||
pub fn apply(&mut self, data: &mut [u8]) {
|
||||
self.cipher.apply_keystream(data);
|
||||
}
|
||||
|
||||
|
||||
/// Encrypt data, returning new buffer
|
||||
pub fn encrypt(&mut self, data: &[u8]) -> Vec<u8> {
|
||||
let mut output = data.to_vec();
|
||||
self.apply(&mut output);
|
||||
output
|
||||
}
|
||||
|
||||
|
||||
/// Decrypt data (for CTR, identical to encrypt)
|
||||
pub fn decrypt(&mut self, data: &[u8]) -> Vec<u8> {
|
||||
self.encrypt(data)
|
||||
|
|
@ -99,27 +108,33 @@ impl Drop for AesCbc {
|
|||
impl AesCbc {
|
||||
/// AES block size
|
||||
const BLOCK_SIZE: usize = 16;
|
||||
|
||||
|
||||
/// Create new AES-CBC cipher with key and IV
|
||||
pub fn new(key: [u8; 32], iv: [u8; 16]) -> Self {
|
||||
Self { key, iv }
|
||||
}
|
||||
|
||||
|
||||
/// Create from slices
|
||||
pub fn from_slices(key: &[u8], iv: &[u8]) -> Result<Self> {
|
||||
if key.len() != 32 {
|
||||
return Err(ProxyError::InvalidKeyLength { expected: 32, got: key.len() });
|
||||
return Err(ProxyError::InvalidKeyLength {
|
||||
expected: 32,
|
||||
got: key.len(),
|
||||
});
|
||||
}
|
||||
if iv.len() != 16 {
|
||||
return Err(ProxyError::InvalidKeyLength { expected: 16, got: iv.len() });
|
||||
return Err(ProxyError::InvalidKeyLength {
|
||||
expected: 16,
|
||||
got: iv.len(),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
Ok(Self {
|
||||
key: key.try_into().unwrap(),
|
||||
iv: iv.try_into().unwrap(),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
/// Encrypt a single block using raw AES (no chaining)
|
||||
fn encrypt_block(&self, block: &[u8; 16], key_schedule: &aes::Aes256) -> [u8; 16] {
|
||||
use aes::cipher::BlockEncrypt;
|
||||
|
|
@ -127,7 +142,7 @@ impl AesCbc {
|
|||
key_schedule.encrypt_block((&mut output).into());
|
||||
output
|
||||
}
|
||||
|
||||
|
||||
/// Decrypt a single block using raw AES (no chaining)
|
||||
fn decrypt_block(&self, block: &[u8; 16], key_schedule: &aes::Aes256) -> [u8; 16] {
|
||||
use aes::cipher::BlockDecrypt;
|
||||
|
|
@ -135,7 +150,7 @@ impl AesCbc {
|
|||
key_schedule.decrypt_block((&mut output).into());
|
||||
output
|
||||
}
|
||||
|
||||
|
||||
/// XOR two 16-byte blocks
|
||||
fn xor_blocks(a: &[u8; 16], b: &[u8; 16]) -> [u8; 16] {
|
||||
let mut result = [0u8; 16];
|
||||
|
|
@ -144,27 +159,28 @@ impl AesCbc {
|
|||
}
|
||||
result
|
||||
}
|
||||
|
||||
|
||||
/// Encrypt data using CBC mode with proper chaining
|
||||
///
|
||||
/// CBC Encryption: C[i] = AES_Encrypt(P[i] XOR C[i-1]), where C[-1] = IV
|
||||
pub fn encrypt(&self, data: &[u8]) -> Result<Vec<u8>> {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
return Err(ProxyError::Crypto(format!(
|
||||
"CBC data must be aligned to 16 bytes, got {}",
|
||||
data.len()
|
||||
)));
|
||||
}
|
||||
|
||||
|
||||
if data.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
|
||||
use aes::cipher::KeyInit;
|
||||
let key_schedule = aes::Aes256::new((&self.key).into());
|
||||
|
||||
|
||||
let mut result = Vec::with_capacity(data.len());
|
||||
let mut prev_ciphertext = self.iv;
|
||||
|
||||
|
||||
for chunk in data.chunks(Self::BLOCK_SIZE) {
|
||||
let plaintext: [u8; 16] = chunk.try_into().unwrap();
|
||||
let xored = Self::xor_blocks(&plaintext, &prev_ciphertext);
|
||||
|
|
@ -172,30 +188,31 @@ impl AesCbc {
|
|||
prev_ciphertext = ciphertext;
|
||||
result.extend_from_slice(&ciphertext);
|
||||
}
|
||||
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
|
||||
/// Decrypt data using CBC mode with proper chaining
|
||||
///
|
||||
/// CBC Decryption: P[i] = AES_Decrypt(C[i]) XOR C[i-1], where C[-1] = IV
|
||||
pub fn decrypt(&self, data: &[u8]) -> Result<Vec<u8>> {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
return Err(ProxyError::Crypto(format!(
|
||||
"CBC data must be aligned to 16 bytes, got {}",
|
||||
data.len()
|
||||
)));
|
||||
}
|
||||
|
||||
|
||||
if data.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
|
||||
use aes::cipher::KeyInit;
|
||||
let key_schedule = aes::Aes256::new((&self.key).into());
|
||||
|
||||
|
||||
let mut result = Vec::with_capacity(data.len());
|
||||
let mut prev_ciphertext = self.iv;
|
||||
|
||||
|
||||
for chunk in data.chunks(Self::BLOCK_SIZE) {
|
||||
let ciphertext: [u8; 16] = chunk.try_into().unwrap();
|
||||
let decrypted = self.decrypt_block(&ciphertext, &key_schedule);
|
||||
|
|
@ -203,75 +220,77 @@ impl AesCbc {
|
|||
prev_ciphertext = ciphertext;
|
||||
result.extend_from_slice(&plaintext);
|
||||
}
|
||||
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
|
||||
/// Encrypt data in-place
|
||||
pub fn encrypt_in_place(&self, data: &mut [u8]) -> Result<()> {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
return Err(ProxyError::Crypto(format!(
|
||||
"CBC data must be aligned to 16 bytes, got {}",
|
||||
data.len()
|
||||
)));
|
||||
}
|
||||
|
||||
|
||||
if data.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
|
||||
use aes::cipher::KeyInit;
|
||||
let key_schedule = aes::Aes256::new((&self.key).into());
|
||||
|
||||
|
||||
let mut prev_ciphertext = self.iv;
|
||||
|
||||
|
||||
for i in (0..data.len()).step_by(Self::BLOCK_SIZE) {
|
||||
let block = &mut data[i..i + Self::BLOCK_SIZE];
|
||||
|
||||
|
||||
for j in 0..Self::BLOCK_SIZE {
|
||||
block[j] ^= prev_ciphertext[j];
|
||||
}
|
||||
|
||||
|
||||
let block_array: &mut [u8; 16] = block.try_into().unwrap();
|
||||
*block_array = self.encrypt_block(block_array, &key_schedule);
|
||||
|
||||
|
||||
prev_ciphertext = *block_array;
|
||||
}
|
||||
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
/// Decrypt data in-place
|
||||
pub fn decrypt_in_place(&self, data: &mut [u8]) -> Result<()> {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
return Err(ProxyError::Crypto(format!(
|
||||
"CBC data must be aligned to 16 bytes, got {}",
|
||||
data.len()
|
||||
)));
|
||||
}
|
||||
|
||||
|
||||
if data.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
|
||||
use aes::cipher::KeyInit;
|
||||
let key_schedule = aes::Aes256::new((&self.key).into());
|
||||
|
||||
|
||||
let mut prev_ciphertext = self.iv;
|
||||
|
||||
|
||||
for i in (0..data.len()).step_by(Self::BLOCK_SIZE) {
|
||||
let block = &mut data[i..i + Self::BLOCK_SIZE];
|
||||
|
||||
|
||||
let current_ciphertext: [u8; 16] = block.try_into().unwrap();
|
||||
|
||||
|
||||
let block_array: &mut [u8; 16] = block.try_into().unwrap();
|
||||
*block_array = self.decrypt_block(block_array, &key_schedule);
|
||||
|
||||
|
||||
for j in 0..Self::BLOCK_SIZE {
|
||||
block[j] ^= prev_ciphertext[j];
|
||||
}
|
||||
|
||||
|
||||
prev_ciphertext = current_ciphertext;
|
||||
}
|
||||
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
@ -318,227 +337,227 @@ impl Decryptor for PassthroughEncryptor {
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
|
||||
// ============= AES-CTR Tests =============
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_ctr_roundtrip() {
|
||||
let key = [0u8; 32];
|
||||
let iv = 12345u128;
|
||||
|
||||
|
||||
let original = b"Hello, MTProto!";
|
||||
|
||||
|
||||
let mut enc = AesCtr::new(&key, iv);
|
||||
let encrypted = enc.encrypt(original);
|
||||
|
||||
|
||||
let mut dec = AesCtr::new(&key, iv);
|
||||
let decrypted = dec.decrypt(&encrypted);
|
||||
|
||||
|
||||
assert_eq!(original.as_slice(), decrypted.as_slice());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_ctr_in_place() {
|
||||
let key = [0x42u8; 32];
|
||||
let iv = 999u128;
|
||||
|
||||
|
||||
let original = b"Test data for in-place encryption";
|
||||
let mut data = original.to_vec();
|
||||
|
||||
|
||||
let mut cipher = AesCtr::new(&key, iv);
|
||||
cipher.apply(&mut data);
|
||||
|
||||
|
||||
assert_ne!(&data[..], original);
|
||||
|
||||
|
||||
let mut cipher = AesCtr::new(&key, iv);
|
||||
cipher.apply(&mut data);
|
||||
|
||||
|
||||
assert_eq!(&data[..], original);
|
||||
}
|
||||
|
||||
|
||||
// ============= AES-CBC Tests =============
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_roundtrip() {
|
||||
let key = [0u8; 32];
|
||||
let iv = [0u8; 16];
|
||||
|
||||
|
||||
let original = [0u8; 32];
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
let encrypted = cipher.encrypt(&original).unwrap();
|
||||
let decrypted = cipher.decrypt(&encrypted).unwrap();
|
||||
|
||||
|
||||
assert_eq!(original.as_slice(), decrypted.as_slice());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_chaining_works() {
|
||||
let key = [0x42u8; 32];
|
||||
let iv = [0x00u8; 16];
|
||||
|
||||
|
||||
let plaintext = [0xAAu8; 32];
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
let ciphertext = cipher.encrypt(&plaintext).unwrap();
|
||||
|
||||
|
||||
let block1 = &ciphertext[0..16];
|
||||
let block2 = &ciphertext[16..32];
|
||||
|
||||
|
||||
assert_ne!(
|
||||
block1, block2,
|
||||
"CBC chaining broken: identical plaintext blocks produced identical ciphertext"
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_known_vector() {
|
||||
let key = [0u8; 32];
|
||||
let iv = [0u8; 16];
|
||||
let plaintext = [0u8; 16];
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
let ciphertext = cipher.encrypt(&plaintext).unwrap();
|
||||
|
||||
|
||||
let decrypted = cipher.decrypt(&ciphertext).unwrap();
|
||||
assert_eq!(plaintext.as_slice(), decrypted.as_slice());
|
||||
|
||||
|
||||
assert_ne!(ciphertext.as_slice(), plaintext.as_slice());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_multi_block() {
|
||||
let key = [0x12u8; 32];
|
||||
let iv = [0x34u8; 16];
|
||||
|
||||
|
||||
let plaintext: Vec<u8> = (0..80).collect();
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
let ciphertext = cipher.encrypt(&plaintext).unwrap();
|
||||
let decrypted = cipher.decrypt(&ciphertext).unwrap();
|
||||
|
||||
|
||||
assert_eq!(plaintext, decrypted);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_in_place() {
|
||||
let key = [0x12u8; 32];
|
||||
let iv = [0x34u8; 16];
|
||||
|
||||
|
||||
let original = [0x56u8; 48];
|
||||
let mut buffer = original;
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
|
||||
|
||||
cipher.encrypt_in_place(&mut buffer).unwrap();
|
||||
assert_ne!(&buffer[..], &original[..]);
|
||||
|
||||
|
||||
cipher.decrypt_in_place(&mut buffer).unwrap();
|
||||
assert_eq!(&buffer[..], &original[..]);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_empty_data() {
|
||||
let cipher = AesCbc::new([0u8; 32], [0u8; 16]);
|
||||
|
||||
|
||||
let encrypted = cipher.encrypt(&[]).unwrap();
|
||||
assert!(encrypted.is_empty());
|
||||
|
||||
|
||||
let decrypted = cipher.decrypt(&[]).unwrap();
|
||||
assert!(decrypted.is_empty());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_unaligned_error() {
|
||||
let cipher = AesCbc::new([0u8; 32], [0u8; 16]);
|
||||
|
||||
|
||||
let result = cipher.encrypt(&[0u8; 15]);
|
||||
assert!(result.is_err());
|
||||
|
||||
|
||||
let result = cipher.encrypt(&[0u8; 17]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_avalanche_effect() {
|
||||
let key = [0xAB; 32];
|
||||
let iv = [0xCD; 16];
|
||||
|
||||
|
||||
let plaintext1 = [0u8; 32];
|
||||
let mut plaintext2 = [0u8; 32];
|
||||
plaintext2[0] = 0x01;
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
|
||||
|
||||
let ciphertext1 = cipher.encrypt(&plaintext1).unwrap();
|
||||
let ciphertext2 = cipher.encrypt(&plaintext2).unwrap();
|
||||
|
||||
|
||||
assert_ne!(&ciphertext1[0..16], &ciphertext2[0..16]);
|
||||
assert_ne!(&ciphertext1[16..32], &ciphertext2[16..32]);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_iv_matters() {
|
||||
let key = [0x55; 32];
|
||||
let plaintext = [0x77u8; 16];
|
||||
|
||||
|
||||
let cipher1 = AesCbc::new(key, [0u8; 16]);
|
||||
let cipher2 = AesCbc::new(key, [1u8; 16]);
|
||||
|
||||
|
||||
let ciphertext1 = cipher1.encrypt(&plaintext).unwrap();
|
||||
let ciphertext2 = cipher2.encrypt(&plaintext).unwrap();
|
||||
|
||||
|
||||
assert_ne!(ciphertext1, ciphertext2);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_deterministic() {
|
||||
let key = [0x99; 32];
|
||||
let iv = [0x88; 16];
|
||||
let plaintext = [0x77u8; 32];
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
|
||||
|
||||
let ciphertext1 = cipher.encrypt(&plaintext).unwrap();
|
||||
let ciphertext2 = cipher.encrypt(&plaintext).unwrap();
|
||||
|
||||
|
||||
assert_eq!(ciphertext1, ciphertext2);
|
||||
}
|
||||
|
||||
|
||||
// ============= Zeroize Tests =============
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_zeroize_on_drop() {
|
||||
let key = [0xAA; 32];
|
||||
let iv = [0xBB; 16];
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
// Verify key/iv are set
|
||||
assert_eq!(cipher.key, [0xAA; 32]);
|
||||
assert_eq!(cipher.iv, [0xBB; 16]);
|
||||
|
||||
|
||||
drop(cipher);
|
||||
// After drop, key/iv are zeroized (can't observe directly,
|
||||
// but the Drop impl runs without panic)
|
||||
}
|
||||
|
||||
|
||||
// ============= Error Handling Tests =============
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_invalid_key_length() {
|
||||
let result = AesCtr::from_key_iv(&[0u8; 16], &[0u8; 16]);
|
||||
assert!(result.is_err());
|
||||
|
||||
|
||||
let result = AesCbc::from_slices(&[0u8; 16], &[0u8; 16]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_invalid_iv_length() {
|
||||
let result = AesCtr::from_key_iv(&[0u8; 32], &[0u8; 8]);
|
||||
assert!(result.is_err());
|
||||
|
||||
|
||||
let result = AesCbc::from_slices(&[0u8; 32], &[0u8; 8]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,10 +12,10 @@
|
|||
//! usages are intentional and protocol-mandated.
|
||||
|
||||
use hmac::{Hmac, Mac};
|
||||
use sha2::Sha256;
|
||||
use md5::Md5;
|
||||
use sha1::Sha1;
|
||||
use sha2::Digest;
|
||||
use sha2::Sha256;
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
|
|
@ -28,8 +28,7 @@ pub fn sha256(data: &[u8]) -> [u8; 32] {
|
|||
|
||||
/// SHA-256 HMAC
|
||||
pub fn sha256_hmac(key: &[u8], data: &[u8]) -> [u8; 32] {
|
||||
let mut mac = HmacSha256::new_from_slice(key)
|
||||
.expect("HMAC accepts any key length");
|
||||
let mut mac = HmacSha256::new_from_slice(key).expect("HMAC accepts any key length");
|
||||
mac.update(data);
|
||||
mac.finalize().into_bytes().into()
|
||||
}
|
||||
|
|
@ -124,27 +123,18 @@ pub fn derive_middleproxy_keys(
|
|||
srv_ipv6: Option<&[u8; 16]>,
|
||||
) -> ([u8; 32], [u8; 16]) {
|
||||
let s = build_middleproxy_prekey(
|
||||
nonce_srv,
|
||||
nonce_clt,
|
||||
clt_ts,
|
||||
srv_ip,
|
||||
clt_port,
|
||||
purpose,
|
||||
clt_ip,
|
||||
srv_port,
|
||||
secret,
|
||||
clt_ipv6,
|
||||
srv_ipv6,
|
||||
nonce_srv, nonce_clt, clt_ts, srv_ip, clt_port, purpose, clt_ip, srv_port, secret,
|
||||
clt_ipv6, srv_ipv6,
|
||||
);
|
||||
|
||||
let md5_1 = md5(&s[1..]);
|
||||
let sha1_sum = sha1(&s);
|
||||
let md5_2 = md5(&s[2..]);
|
||||
|
||||
|
||||
let mut key = [0u8; 32];
|
||||
key[..12].copy_from_slice(&md5_1[..12]);
|
||||
key[12..].copy_from_slice(&sha1_sum);
|
||||
|
||||
|
||||
(key, md5_2)
|
||||
}
|
||||
|
||||
|
|
@ -164,17 +154,8 @@ mod tests {
|
|||
let secret = vec![0x55u8; 128];
|
||||
|
||||
let prekey = build_middleproxy_prekey(
|
||||
&nonce_srv,
|
||||
&nonce_clt,
|
||||
&clt_ts,
|
||||
srv_ip,
|
||||
&clt_port,
|
||||
b"CLIENT",
|
||||
clt_ip,
|
||||
&srv_port,
|
||||
&secret,
|
||||
None,
|
||||
None,
|
||||
&nonce_srv, &nonce_clt, &clt_ts, srv_ip, &clt_port, b"CLIENT", clt_ip, &srv_port,
|
||||
&secret, None, None,
|
||||
);
|
||||
let digest = sha256(&prekey);
|
||||
assert_eq!(
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ pub mod aes;
|
|||
pub mod hash;
|
||||
pub mod random;
|
||||
|
||||
pub use aes::{AesCtr, AesCbc};
|
||||
pub use aes::{AesCbc, AesCtr};
|
||||
pub use hash::{
|
||||
build_middleproxy_prekey, crc32, crc32c, derive_middleproxy_keys, sha256, sha256_hmac,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -3,11 +3,11 @@
|
|||
#![allow(deprecated)]
|
||||
#![allow(dead_code)]
|
||||
|
||||
use rand::{Rng, RngCore, SeedableRng};
|
||||
use rand::rngs::StdRng;
|
||||
use parking_lot::Mutex;
|
||||
use zeroize::Zeroize;
|
||||
use crate::crypto::AesCtr;
|
||||
use parking_lot::Mutex;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, RngExt, SeedableRng};
|
||||
use zeroize::Zeroize;
|
||||
|
||||
/// Cryptographically secure PRNG with AES-CTR
|
||||
pub struct SecureRandom {
|
||||
|
|
@ -21,6 +21,7 @@ struct SecureRandomInner {
|
|||
rng: StdRng,
|
||||
cipher: AesCtr,
|
||||
buffer: Vec<u8>,
|
||||
buffer_start: usize,
|
||||
}
|
||||
|
||||
impl Drop for SecureRandomInner {
|
||||
|
|
@ -33,25 +34,26 @@ impl SecureRandom {
|
|||
pub fn new() -> Self {
|
||||
let mut seed_source = rand::rng();
|
||||
let mut rng = StdRng::from_rng(&mut seed_source);
|
||||
|
||||
|
||||
let mut key = [0u8; 32];
|
||||
rng.fill_bytes(&mut key);
|
||||
let iv: u128 = rng.random();
|
||||
|
||||
|
||||
let cipher = AesCtr::new(&key, iv);
|
||||
|
||||
|
||||
// Zeroize local key copy — cipher already consumed it
|
||||
key.zeroize();
|
||||
|
||||
|
||||
Self {
|
||||
inner: Mutex::new(SecureRandomInner {
|
||||
rng,
|
||||
cipher,
|
||||
buffer: Vec::with_capacity(1024),
|
||||
buffer_start: 0,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Fill a caller-provided buffer with random bytes.
|
||||
pub fn fill(&self, out: &mut [u8]) {
|
||||
let mut inner = self.inner.lock();
|
||||
|
|
@ -59,16 +61,29 @@ impl SecureRandom {
|
|||
|
||||
let mut written = 0usize;
|
||||
while written < out.len() {
|
||||
if inner.buffer_start >= inner.buffer.len() {
|
||||
inner.buffer.clear();
|
||||
inner.buffer_start = 0;
|
||||
}
|
||||
|
||||
if inner.buffer.is_empty() {
|
||||
let mut chunk = vec![0u8; CHUNK_SIZE];
|
||||
inner.rng.fill_bytes(&mut chunk);
|
||||
inner.cipher.apply(&mut chunk);
|
||||
inner.buffer.extend_from_slice(&chunk);
|
||||
inner.buffer_start = 0;
|
||||
}
|
||||
|
||||
let take = (out.len() - written).min(inner.buffer.len());
|
||||
out[written..written + take].copy_from_slice(&inner.buffer[..take]);
|
||||
inner.buffer.drain(..take);
|
||||
let available = inner.buffer.len().saturating_sub(inner.buffer_start);
|
||||
let take = (out.len() - written).min(available);
|
||||
let start = inner.buffer_start;
|
||||
let end = start + take;
|
||||
out[written..written + take].copy_from_slice(&inner.buffer[start..end]);
|
||||
inner.buffer_start = end;
|
||||
if inner.buffer_start >= inner.buffer.len() {
|
||||
inner.buffer.clear();
|
||||
inner.buffer_start = 0;
|
||||
}
|
||||
written += take;
|
||||
}
|
||||
}
|
||||
|
|
@ -79,25 +94,25 @@ impl SecureRandom {
|
|||
self.fill(&mut out);
|
||||
out
|
||||
}
|
||||
|
||||
|
||||
/// Generate random number in range [0, max)
|
||||
pub fn range(&self, max: usize) -> usize {
|
||||
if max == 0 {
|
||||
return 0;
|
||||
}
|
||||
let mut inner = self.inner.lock();
|
||||
inner.rng.gen_range(0..max)
|
||||
inner.rng.random_range(0..max)
|
||||
}
|
||||
|
||||
|
||||
/// Generate random bits
|
||||
pub fn bits(&self, k: usize) -> u64 {
|
||||
if k == 0 {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
let bytes_needed = k.div_ceil(8);
|
||||
let bytes = self.bytes(bytes_needed.min(8));
|
||||
|
||||
|
||||
let mut result = 0u64;
|
||||
for (i, &b) in bytes.iter().enumerate() {
|
||||
if i >= 8 {
|
||||
|
|
@ -105,14 +120,14 @@ impl SecureRandom {
|
|||
}
|
||||
result |= (b as u64) << (i * 8);
|
||||
}
|
||||
|
||||
|
||||
if k < 64 {
|
||||
result &= (1u64 << k) - 1;
|
||||
}
|
||||
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
|
||||
/// Choose random element from slice
|
||||
pub fn choose<'a, T>(&self, slice: &'a [T]) -> Option<&'a T> {
|
||||
if slice.is_empty() {
|
||||
|
|
@ -121,22 +136,22 @@ impl SecureRandom {
|
|||
Some(&slice[self.range(slice.len())])
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Shuffle slice in place
|
||||
pub fn shuffle<T>(&self, slice: &mut [T]) {
|
||||
let mut inner = self.inner.lock();
|
||||
for i in (1..slice.len()).rev() {
|
||||
let j = inner.rng.gen_range(0..=i);
|
||||
let j = inner.rng.random_range(0..=i);
|
||||
slice.swap(i, j);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Generate random u32
|
||||
pub fn u32(&self) -> u32 {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.rng.random()
|
||||
}
|
||||
|
||||
|
||||
/// Generate random u64
|
||||
pub fn u64(&self) -> u64 {
|
||||
let mut inner = self.inner.lock();
|
||||
|
|
@ -154,7 +169,7 @@ impl Default for SecureRandom {
|
|||
mod tests {
|
||||
use super::*;
|
||||
use std::collections::HashSet;
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_bytes_uniqueness() {
|
||||
let rng = SecureRandom::new();
|
||||
|
|
@ -162,7 +177,7 @@ mod tests {
|
|||
let b = rng.bytes(32);
|
||||
assert_ne!(a, b);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_bytes_length() {
|
||||
let rng = SecureRandom::new();
|
||||
|
|
@ -171,63 +186,63 @@ mod tests {
|
|||
assert_eq!(rng.bytes(100).len(), 100);
|
||||
assert_eq!(rng.bytes(1000).len(), 1000);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_range() {
|
||||
let rng = SecureRandom::new();
|
||||
|
||||
|
||||
for _ in 0..1000 {
|
||||
let n = rng.range(10);
|
||||
assert!(n < 10);
|
||||
}
|
||||
|
||||
|
||||
assert_eq!(rng.range(1), 0);
|
||||
assert_eq!(rng.range(0), 0);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_bits() {
|
||||
let rng = SecureRandom::new();
|
||||
|
||||
|
||||
for _ in 0..100 {
|
||||
assert!(rng.bits(1) <= 1);
|
||||
}
|
||||
|
||||
|
||||
for _ in 0..100 {
|
||||
assert!(rng.bits(8) <= 255);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_choose() {
|
||||
let rng = SecureRandom::new();
|
||||
let items = vec![1, 2, 3, 4, 5];
|
||||
|
||||
|
||||
let mut seen = HashSet::new();
|
||||
for _ in 0..1000 {
|
||||
if let Some(&item) = rng.choose(&items) {
|
||||
seen.insert(item);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
assert_eq!(seen.len(), 5);
|
||||
|
||||
|
||||
let empty: Vec<i32> = vec![];
|
||||
assert!(rng.choose(&empty).is_none());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_shuffle() {
|
||||
let rng = SecureRandom::new();
|
||||
let original = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
|
||||
|
||||
|
||||
let mut shuffled = original.clone();
|
||||
rng.shuffle(&mut shuffled);
|
||||
|
||||
|
||||
let mut sorted = shuffled.clone();
|
||||
sorted.sort();
|
||||
assert_eq!(sorted, original);
|
||||
|
||||
|
||||
assert_ne!(shuffled, original);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,541 @@
|
|||
//! Unix daemon support for telemt.
|
||||
//!
|
||||
//! Provides classic Unix daemonization (double-fork), PID file management,
|
||||
//! and privilege dropping for running telemt as a background service.
|
||||
|
||||
use std::fs::{self, File, OpenOptions};
|
||||
use std::io::{self, Read, Write};
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use nix::fcntl::{Flock, FlockArg};
|
||||
use nix::unistd::{self, ForkResult, Gid, Pid, Uid, chdir, close, fork, getpid, setsid};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
/// Default PID file location.
|
||||
pub const DEFAULT_PID_FILE: &str = "/var/run/telemt.pid";
|
||||
|
||||
/// Daemon configuration options parsed from CLI.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct DaemonOptions {
|
||||
/// Run as daemon (fork to background).
|
||||
pub daemonize: bool,
|
||||
/// Path to PID file.
|
||||
pub pid_file: Option<PathBuf>,
|
||||
/// User to run as after binding sockets.
|
||||
pub user: Option<String>,
|
||||
/// Group to run as after binding sockets.
|
||||
pub group: Option<String>,
|
||||
/// Working directory for the daemon.
|
||||
pub working_dir: Option<PathBuf>,
|
||||
/// Explicit foreground mode (for systemd Type=simple).
|
||||
pub foreground: bool,
|
||||
}
|
||||
|
||||
impl DaemonOptions {
|
||||
/// Returns the effective PID file path.
|
||||
pub fn pid_file_path(&self) -> &Path {
|
||||
self.pid_file
|
||||
.as_deref()
|
||||
.unwrap_or(Path::new(DEFAULT_PID_FILE))
|
||||
}
|
||||
|
||||
/// Returns true if we should actually daemonize.
|
||||
/// Foreground flag takes precedence.
|
||||
pub fn should_daemonize(&self) -> bool {
|
||||
self.daemonize && !self.foreground
|
||||
}
|
||||
}
|
||||
|
||||
/// Error types for daemon operations.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum DaemonError {
|
||||
#[error("fork failed: {0}")]
|
||||
ForkFailed(#[source] nix::Error),
|
||||
|
||||
#[error("setsid failed: {0}")]
|
||||
SetsidFailed(#[source] nix::Error),
|
||||
|
||||
#[error("chdir failed: {0}")]
|
||||
ChdirFailed(#[source] nix::Error),
|
||||
|
||||
#[error("failed to open /dev/null: {0}")]
|
||||
DevNullFailed(#[source] io::Error),
|
||||
|
||||
#[error("failed to redirect stdio: {0}")]
|
||||
RedirectFailed(#[source] nix::Error),
|
||||
|
||||
#[error("PID file error: {0}")]
|
||||
PidFile(String),
|
||||
|
||||
#[error("another instance is already running (pid {0})")]
|
||||
AlreadyRunning(i32),
|
||||
|
||||
#[error("user '{0}' not found")]
|
||||
UserNotFound(String),
|
||||
|
||||
#[error("group '{0}' not found")]
|
||||
GroupNotFound(String),
|
||||
|
||||
#[error("failed to set uid/gid: {0}")]
|
||||
PrivilegeDrop(#[source] nix::Error),
|
||||
|
||||
#[error("io error: {0}")]
|
||||
Io(#[from] io::Error),
|
||||
}
|
||||
|
||||
/// Result of a successful daemonize() call.
|
||||
#[derive(Debug)]
|
||||
pub enum DaemonizeResult {
|
||||
/// We are the parent process and should exit.
|
||||
Parent,
|
||||
/// We are the daemon child process and should continue.
|
||||
Child,
|
||||
}
|
||||
|
||||
/// Performs classic Unix double-fork daemonization.
|
||||
///
|
||||
/// This detaches the process from the controlling terminal:
|
||||
/// 1. First fork - parent exits, child continues
|
||||
/// 2. setsid() - become session leader
|
||||
/// 3. Second fork - ensure we can never acquire a controlling terminal
|
||||
/// 4. chdir("/") - don't hold any directory open
|
||||
/// 5. Redirect stdin/stdout/stderr to /dev/null
|
||||
///
|
||||
/// Returns `DaemonizeResult::Parent` in the original parent (which should exit),
|
||||
/// or `DaemonizeResult::Child` in the final daemon child.
|
||||
pub fn daemonize(working_dir: Option<&Path>) -> Result<DaemonizeResult, DaemonError> {
|
||||
// First fork
|
||||
match unsafe { fork() } {
|
||||
Ok(ForkResult::Parent { .. }) => {
|
||||
// Parent exits
|
||||
return Ok(DaemonizeResult::Parent);
|
||||
}
|
||||
Ok(ForkResult::Child) => {
|
||||
// Child continues
|
||||
}
|
||||
Err(e) => return Err(DaemonError::ForkFailed(e)),
|
||||
}
|
||||
|
||||
// Create new session, become session leader
|
||||
setsid().map_err(DaemonError::SetsidFailed)?;
|
||||
|
||||
// Second fork to ensure we can never acquire a controlling terminal
|
||||
match unsafe { fork() } {
|
||||
Ok(ForkResult::Parent { .. }) => {
|
||||
// Intermediate parent exits
|
||||
std::process::exit(0);
|
||||
}
|
||||
Ok(ForkResult::Child) => {
|
||||
// Final daemon child continues
|
||||
}
|
||||
Err(e) => return Err(DaemonError::ForkFailed(e)),
|
||||
}
|
||||
|
||||
// Change working directory
|
||||
let target_dir = working_dir.unwrap_or(Path::new("/"));
|
||||
chdir(target_dir).map_err(DaemonError::ChdirFailed)?;
|
||||
|
||||
// Redirect stdin, stdout, stderr to /dev/null
|
||||
redirect_stdio_to_devnull()?;
|
||||
|
||||
Ok(DaemonizeResult::Child)
|
||||
}
|
||||
|
||||
/// Redirects stdin, stdout, and stderr to /dev/null.
|
||||
fn redirect_stdio_to_devnull() -> Result<(), DaemonError> {
|
||||
let devnull = File::options()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.open("/dev/null")
|
||||
.map_err(DaemonError::DevNullFailed)?;
|
||||
|
||||
let devnull_fd = std::os::unix::io::AsRawFd::as_raw_fd(&devnull);
|
||||
|
||||
// Use libc::dup2 directly for redirecting standard file descriptors
|
||||
// nix 0.31's dup2 requires OwnedFd which doesn't work well with stdio fds
|
||||
unsafe {
|
||||
// Redirect stdin (fd 0)
|
||||
if libc::dup2(devnull_fd, 0) < 0 {
|
||||
return Err(DaemonError::RedirectFailed(nix::errno::Errno::last()));
|
||||
}
|
||||
// Redirect stdout (fd 1)
|
||||
if libc::dup2(devnull_fd, 1) < 0 {
|
||||
return Err(DaemonError::RedirectFailed(nix::errno::Errno::last()));
|
||||
}
|
||||
// Redirect stderr (fd 2)
|
||||
if libc::dup2(devnull_fd, 2) < 0 {
|
||||
return Err(DaemonError::RedirectFailed(nix::errno::Errno::last()));
|
||||
}
|
||||
}
|
||||
|
||||
// Close original devnull fd if it's not one of the standard fds
|
||||
if devnull_fd > 2 {
|
||||
let _ = close(devnull_fd);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// PID file manager with flock-based locking.
|
||||
pub struct PidFile {
|
||||
path: PathBuf,
|
||||
file: Option<File>,
|
||||
locked: bool,
|
||||
}
|
||||
|
||||
impl PidFile {
|
||||
/// Creates a new PID file manager for the given path.
|
||||
pub fn new<P: AsRef<Path>>(path: P) -> Self {
|
||||
Self {
|
||||
path: path.as_ref().to_path_buf(),
|
||||
file: None,
|
||||
locked: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if another instance is already running.
|
||||
///
|
||||
/// Returns the PID of the running instance if one exists.
|
||||
pub fn check_running(&self) -> Result<Option<i32>, DaemonError> {
|
||||
if !self.path.exists() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Try to read existing PID
|
||||
let mut contents = String::new();
|
||||
File::open(&self.path)
|
||||
.and_then(|mut f| f.read_to_string(&mut contents))
|
||||
.map_err(|e| {
|
||||
DaemonError::PidFile(format!("cannot read {}: {}", self.path.display(), e))
|
||||
})?;
|
||||
|
||||
let pid: i32 = contents
|
||||
.trim()
|
||||
.parse()
|
||||
.map_err(|_| DaemonError::PidFile(format!("invalid PID in {}", self.path.display())))?;
|
||||
|
||||
// Check if process is still running
|
||||
if is_process_running(pid) {
|
||||
Ok(Some(pid))
|
||||
} else {
|
||||
// Stale PID file
|
||||
debug!(pid, path = %self.path.display(), "Removing stale PID file");
|
||||
let _ = fs::remove_file(&self.path);
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Acquires the PID file lock and writes the current PID.
|
||||
///
|
||||
/// Fails if another instance is already running.
|
||||
pub fn acquire(&mut self) -> Result<(), DaemonError> {
|
||||
// Check for running instance first
|
||||
if let Some(pid) = self.check_running()? {
|
||||
return Err(DaemonError::AlreadyRunning(pid));
|
||||
}
|
||||
|
||||
// Ensure parent directory exists
|
||||
if let Some(parent) = self.path.parent() {
|
||||
if !parent.exists() {
|
||||
fs::create_dir_all(parent).map_err(|e| {
|
||||
DaemonError::PidFile(format!(
|
||||
"cannot create directory {}: {}",
|
||||
parent.display(),
|
||||
e
|
||||
))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
||||
// Open/create PID file with exclusive lock
|
||||
let file = OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(true)
|
||||
.mode(0o644)
|
||||
.open(&self.path)
|
||||
.map_err(|e| {
|
||||
DaemonError::PidFile(format!("cannot open {}: {}", self.path.display(), e))
|
||||
})?;
|
||||
|
||||
// Try to acquire exclusive lock (non-blocking)
|
||||
let flock = Flock::lock(file, FlockArg::LockExclusiveNonblock).map_err(|(_, errno)| {
|
||||
// Check if another instance grabbed the lock
|
||||
if let Some(pid) = self.check_running().ok().flatten() {
|
||||
DaemonError::AlreadyRunning(pid)
|
||||
} else {
|
||||
DaemonError::PidFile(format!("cannot lock {}: {}", self.path.display(), errno))
|
||||
}
|
||||
})?;
|
||||
|
||||
// Write our PID
|
||||
let pid = getpid();
|
||||
let mut file = flock
|
||||
.unlock()
|
||||
.map_err(|(_, errno)| DaemonError::PidFile(format!("unlock failed: {}", errno)))?;
|
||||
|
||||
writeln!(file, "{}", pid).map_err(|e| {
|
||||
DaemonError::PidFile(format!(
|
||||
"cannot write PID to {}: {}",
|
||||
self.path.display(),
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
// Re-acquire lock and keep it
|
||||
let flock = Flock::lock(file, FlockArg::LockExclusiveNonblock).map_err(|(_, errno)| {
|
||||
DaemonError::PidFile(format!("cannot re-lock {}: {}", self.path.display(), errno))
|
||||
})?;
|
||||
|
||||
self.file = Some(flock.unlock().map_err(|(_, errno)| {
|
||||
DaemonError::PidFile(format!("unlock for storage failed: {}", errno))
|
||||
})?);
|
||||
self.locked = true;
|
||||
|
||||
info!(pid = pid.as_raw(), path = %self.path.display(), "PID file created");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Releases the PID file lock and removes the file.
|
||||
pub fn release(&mut self) -> Result<(), DaemonError> {
|
||||
if let Some(file) = self.file.take() {
|
||||
drop(file);
|
||||
}
|
||||
self.locked = false;
|
||||
|
||||
if self.path.exists() {
|
||||
fs::remove_file(&self.path).map_err(|e| {
|
||||
DaemonError::PidFile(format!("cannot remove {}: {}", self.path.display(), e))
|
||||
})?;
|
||||
debug!(path = %self.path.display(), "PID file removed");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the path to this PID file.
|
||||
#[allow(dead_code)]
|
||||
pub fn path(&self) -> &Path {
|
||||
&self.path
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for PidFile {
|
||||
fn drop(&mut self) {
|
||||
if self.locked {
|
||||
if let Err(e) = self.release() {
|
||||
warn!(error = %e, "Failed to clean up PID file on drop");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if a process with the given PID is running.
|
||||
fn is_process_running(pid: i32) -> bool {
|
||||
// kill(pid, 0) checks if process exists without sending a signal
|
||||
nix::sys::signal::kill(Pid::from_raw(pid), None).is_ok()
|
||||
}
|
||||
|
||||
/// Drops privileges to the specified user and group.
|
||||
///
|
||||
/// This should be called after binding privileged ports but before
|
||||
/// entering the main event loop.
|
||||
pub fn drop_privileges(user: Option<&str>, group: Option<&str>) -> Result<(), DaemonError> {
|
||||
// Look up group first (need to do this while still root)
|
||||
let target_gid = if let Some(group_name) = group {
|
||||
Some(lookup_group(group_name)?)
|
||||
} else if let Some(user_name) = user {
|
||||
// If no group specified but user is, use user's primary group
|
||||
Some(lookup_user_primary_gid(user_name)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Look up user
|
||||
let target_uid = if let Some(user_name) = user {
|
||||
Some(lookup_user(user_name)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Drop privileges: set GID first, then UID
|
||||
// (Setting UID first would prevent us from setting GID)
|
||||
if let Some(gid) = target_gid {
|
||||
unistd::setgid(gid).map_err(DaemonError::PrivilegeDrop)?;
|
||||
// Also set supplementary groups to just this one
|
||||
unistd::setgroups(&[gid]).map_err(DaemonError::PrivilegeDrop)?;
|
||||
info!(gid = gid.as_raw(), "Dropped group privileges");
|
||||
}
|
||||
|
||||
if let Some(uid) = target_uid {
|
||||
unistd::setuid(uid).map_err(DaemonError::PrivilegeDrop)?;
|
||||
info!(uid = uid.as_raw(), "Dropped user privileges");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Looks up a user by name and returns their UID.
|
||||
fn lookup_user(name: &str) -> Result<Uid, DaemonError> {
|
||||
// Use libc getpwnam
|
||||
let c_name =
|
||||
std::ffi::CString::new(name).map_err(|_| DaemonError::UserNotFound(name.to_string()))?;
|
||||
|
||||
unsafe {
|
||||
let pwd = libc::getpwnam(c_name.as_ptr());
|
||||
if pwd.is_null() {
|
||||
Err(DaemonError::UserNotFound(name.to_string()))
|
||||
} else {
|
||||
Ok(Uid::from_raw((*pwd).pw_uid))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Looks up a user's primary GID by username.
|
||||
fn lookup_user_primary_gid(name: &str) -> Result<Gid, DaemonError> {
|
||||
let c_name =
|
||||
std::ffi::CString::new(name).map_err(|_| DaemonError::UserNotFound(name.to_string()))?;
|
||||
|
||||
unsafe {
|
||||
let pwd = libc::getpwnam(c_name.as_ptr());
|
||||
if pwd.is_null() {
|
||||
Err(DaemonError::UserNotFound(name.to_string()))
|
||||
} else {
|
||||
Ok(Gid::from_raw((*pwd).pw_gid))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Looks up a group by name and returns its GID.
|
||||
fn lookup_group(name: &str) -> Result<Gid, DaemonError> {
|
||||
let c_name =
|
||||
std::ffi::CString::new(name).map_err(|_| DaemonError::GroupNotFound(name.to_string()))?;
|
||||
|
||||
unsafe {
|
||||
let grp = libc::getgrnam(c_name.as_ptr());
|
||||
if grp.is_null() {
|
||||
Err(DaemonError::GroupNotFound(name.to_string()))
|
||||
} else {
|
||||
Ok(Gid::from_raw((*grp).gr_gid))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads PID from a PID file.
|
||||
#[allow(dead_code)]
|
||||
pub fn read_pid_file<P: AsRef<Path>>(path: P) -> Result<i32, DaemonError> {
|
||||
let path = path.as_ref();
|
||||
let mut contents = String::new();
|
||||
File::open(path)
|
||||
.and_then(|mut f| f.read_to_string(&mut contents))
|
||||
.map_err(|e| DaemonError::PidFile(format!("cannot read {}: {}", path.display(), e)))?;
|
||||
|
||||
contents
|
||||
.trim()
|
||||
.parse()
|
||||
.map_err(|_| DaemonError::PidFile(format!("invalid PID in {}", path.display())))
|
||||
}
|
||||
|
||||
/// Sends a signal to the process specified in a PID file.
|
||||
#[allow(dead_code)]
|
||||
pub fn signal_pid_file<P: AsRef<Path>>(
|
||||
path: P,
|
||||
signal: nix::sys::signal::Signal,
|
||||
) -> Result<(), DaemonError> {
|
||||
let pid = read_pid_file(&path)?;
|
||||
|
||||
if !is_process_running(pid) {
|
||||
return Err(DaemonError::PidFile(format!(
|
||||
"process {} from {} is not running",
|
||||
pid,
|
||||
path.as_ref().display()
|
||||
)));
|
||||
}
|
||||
|
||||
nix::sys::signal::kill(Pid::from_raw(pid), signal)
|
||||
.map_err(|e| DaemonError::PidFile(format!("cannot signal process {}: {}", pid, e)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the status of the daemon based on PID file.
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum DaemonStatus {
|
||||
/// Daemon is running with the given PID.
|
||||
Running(i32),
|
||||
/// PID file exists but process is not running.
|
||||
Stale(i32),
|
||||
/// No PID file exists.
|
||||
NotRunning,
|
||||
}
|
||||
|
||||
/// Checks the daemon status from a PID file.
|
||||
#[allow(dead_code)]
|
||||
pub fn check_status<P: AsRef<Path>>(path: P) -> DaemonStatus {
|
||||
let path = path.as_ref();
|
||||
|
||||
if !path.exists() {
|
||||
return DaemonStatus::NotRunning;
|
||||
}
|
||||
|
||||
match read_pid_file(path) {
|
||||
Ok(pid) => {
|
||||
if is_process_running(pid) {
|
||||
DaemonStatus::Running(pid)
|
||||
} else {
|
||||
DaemonStatus::Stale(pid)
|
||||
}
|
||||
}
|
||||
Err(_) => DaemonStatus::NotRunning,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_daemon_options_default() {
|
||||
let opts = DaemonOptions::default();
|
||||
assert!(!opts.daemonize);
|
||||
assert!(!opts.should_daemonize());
|
||||
assert_eq!(opts.pid_file_path(), Path::new(DEFAULT_PID_FILE));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_daemon_options_foreground_overrides() {
|
||||
let opts = DaemonOptions {
|
||||
daemonize: true,
|
||||
foreground: true,
|
||||
..Default::default()
|
||||
};
|
||||
assert!(!opts.should_daemonize());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_status_not_running() {
|
||||
let path = "/tmp/telemt_test_nonexistent.pid";
|
||||
assert_eq!(check_status(path), DaemonStatus::NotRunning);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pid_file_basic() {
|
||||
let path = "/tmp/telemt_test_pidfile.pid";
|
||||
let _ = fs::remove_file(path);
|
||||
|
||||
let mut pf = PidFile::new(path);
|
||||
assert!(pf.check_running().unwrap().is_none());
|
||||
|
||||
pf.acquire().unwrap();
|
||||
assert!(Path::new(path).exists());
|
||||
|
||||
// Read it back
|
||||
let pid = read_pid_file(path).unwrap();
|
||||
assert_eq!(pid, std::process::id() as i32);
|
||||
|
||||
pf.release().unwrap();
|
||||
assert!(!Path::new(path).exists());
|
||||
}
|
||||
}
|
||||
211
src/error.rs
211
src/error.rs
|
|
@ -12,28 +12,15 @@ use thiserror::Error;
|
|||
#[derive(Debug)]
|
||||
pub enum StreamError {
|
||||
/// Partial read: got fewer bytes than expected
|
||||
PartialRead {
|
||||
expected: usize,
|
||||
got: usize,
|
||||
},
|
||||
PartialRead { expected: usize, got: usize },
|
||||
/// Partial write: wrote fewer bytes than expected
|
||||
PartialWrite {
|
||||
expected: usize,
|
||||
written: usize,
|
||||
},
|
||||
PartialWrite { expected: usize, written: usize },
|
||||
/// Stream is in poisoned state and cannot be used
|
||||
Poisoned {
|
||||
reason: String,
|
||||
},
|
||||
Poisoned { reason: String },
|
||||
/// Buffer overflow: attempted to buffer more than allowed
|
||||
BufferOverflow {
|
||||
limit: usize,
|
||||
attempted: usize,
|
||||
},
|
||||
BufferOverflow { limit: usize, attempted: usize },
|
||||
/// Invalid frame format
|
||||
InvalidFrame {
|
||||
details: String,
|
||||
},
|
||||
InvalidFrame { details: String },
|
||||
/// Unexpected end of stream
|
||||
UnexpectedEof,
|
||||
/// Underlying I/O error
|
||||
|
|
@ -47,13 +34,21 @@ impl fmt::Display for StreamError {
|
|||
write!(f, "partial read: expected {} bytes, got {}", expected, got)
|
||||
}
|
||||
Self::PartialWrite { expected, written } => {
|
||||
write!(f, "partial write: expected {} bytes, wrote {}", expected, written)
|
||||
write!(
|
||||
f,
|
||||
"partial write: expected {} bytes, wrote {}",
|
||||
expected, written
|
||||
)
|
||||
}
|
||||
Self::Poisoned { reason } => {
|
||||
write!(f, "stream poisoned: {}", reason)
|
||||
}
|
||||
Self::BufferOverflow { limit, attempted } => {
|
||||
write!(f, "buffer overflow: limit {}, attempted {}", limit, attempted)
|
||||
write!(
|
||||
f,
|
||||
"buffer overflow: limit {}, attempted {}",
|
||||
limit, attempted
|
||||
)
|
||||
}
|
||||
Self::InvalidFrame { details } => {
|
||||
write!(f, "invalid frame: {}", details)
|
||||
|
|
@ -90,9 +85,7 @@ impl From<StreamError> for std::io::Error {
|
|||
StreamError::UnexpectedEof => {
|
||||
std::io::Error::new(std::io::ErrorKind::UnexpectedEof, err)
|
||||
}
|
||||
StreamError::Poisoned { .. } => {
|
||||
std::io::Error::other(err)
|
||||
}
|
||||
StreamError::Poisoned { .. } => std::io::Error::other(err),
|
||||
StreamError::BufferOverflow { .. } => {
|
||||
std::io::Error::new(std::io::ErrorKind::OutOfMemory, err)
|
||||
}
|
||||
|
|
@ -112,7 +105,7 @@ impl From<StreamError> for std::io::Error {
|
|||
pub trait Recoverable {
|
||||
/// Check if error is recoverable (can retry operation)
|
||||
fn is_recoverable(&self) -> bool;
|
||||
|
||||
|
||||
/// Check if connection can continue after this error
|
||||
fn can_continue(&self) -> bool;
|
||||
}
|
||||
|
|
@ -123,19 +116,22 @@ impl Recoverable for StreamError {
|
|||
Self::PartialRead { .. } | Self::PartialWrite { .. } => true,
|
||||
Self::Io(e) => matches!(
|
||||
e.kind(),
|
||||
std::io::ErrorKind::WouldBlock
|
||||
| std::io::ErrorKind::Interrupted
|
||||
| std::io::ErrorKind::TimedOut
|
||||
std::io::ErrorKind::WouldBlock
|
||||
| std::io::ErrorKind::Interrupted
|
||||
| std::io::ErrorKind::TimedOut
|
||||
),
|
||||
Self::Poisoned { .. }
|
||||
Self::Poisoned { .. }
|
||||
| Self::BufferOverflow { .. }
|
||||
| Self::InvalidFrame { .. }
|
||||
| Self::UnexpectedEof => false,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn can_continue(&self) -> bool {
|
||||
!matches!(self, Self::Poisoned { .. } | Self::UnexpectedEof | Self::BufferOverflow { .. })
|
||||
!matches!(
|
||||
self,
|
||||
Self::Poisoned { .. } | Self::UnexpectedEof | Self::BufferOverflow { .. }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -143,19 +139,19 @@ impl Recoverable for std::io::Error {
|
|||
fn is_recoverable(&self) -> bool {
|
||||
matches!(
|
||||
self.kind(),
|
||||
std::io::ErrorKind::WouldBlock
|
||||
| std::io::ErrorKind::Interrupted
|
||||
| std::io::ErrorKind::TimedOut
|
||||
std::io::ErrorKind::WouldBlock
|
||||
| std::io::ErrorKind::Interrupted
|
||||
| std::io::ErrorKind::TimedOut
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
fn can_continue(&self) -> bool {
|
||||
!matches!(
|
||||
self.kind(),
|
||||
std::io::ErrorKind::BrokenPipe
|
||||
| std::io::ErrorKind::ConnectionReset
|
||||
| std::io::ErrorKind::ConnectionAborted
|
||||
| std::io::ErrorKind::NotConnected
|
||||
| std::io::ErrorKind::ConnectionReset
|
||||
| std::io::ErrorKind::ConnectionAborted
|
||||
| std::io::ErrorKind::NotConnected
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
@ -165,96 +161,91 @@ impl Recoverable for std::io::Error {
|
|||
#[derive(Error, Debug)]
|
||||
pub enum ProxyError {
|
||||
// ============= Crypto Errors =============
|
||||
|
||||
#[error("Crypto error: {0}")]
|
||||
Crypto(String),
|
||||
|
||||
|
||||
#[error("Invalid key length: expected {expected}, got {got}")]
|
||||
InvalidKeyLength { expected: usize, got: usize },
|
||||
|
||||
|
||||
// ============= Stream Errors =============
|
||||
|
||||
#[error("Stream error: {0}")]
|
||||
Stream(#[from] StreamError),
|
||||
|
||||
|
||||
// ============= Protocol Errors =============
|
||||
|
||||
#[error("Invalid handshake: {0}")]
|
||||
InvalidHandshake(String),
|
||||
|
||||
|
||||
#[error("Invalid protocol tag: {0:02x?}")]
|
||||
InvalidProtoTag([u8; 4]),
|
||||
|
||||
|
||||
#[error("Invalid TLS record: type={record_type}, version={version:02x?}")]
|
||||
InvalidTlsRecord { record_type: u8, version: [u8; 2] },
|
||||
|
||||
|
||||
#[error("Replay attack detected from {addr}")]
|
||||
ReplayAttack { addr: SocketAddr },
|
||||
|
||||
|
||||
#[error("Time skew detected: client={client_time}, server={server_time}")]
|
||||
TimeSkew { client_time: u32, server_time: u32 },
|
||||
|
||||
|
||||
#[error("Invalid message length: {len} (min={min}, max={max})")]
|
||||
InvalidMessageLength { len: usize, min: usize, max: usize },
|
||||
|
||||
|
||||
#[error("Checksum mismatch: expected={expected:08x}, got={got:08x}")]
|
||||
ChecksumMismatch { expected: u32, got: u32 },
|
||||
|
||||
|
||||
#[error("Sequence number mismatch: expected={expected}, got={got}")]
|
||||
SeqNoMismatch { expected: i32, got: i32 },
|
||||
|
||||
|
||||
#[error("TLS handshake failed: {reason}")]
|
||||
TlsHandshakeFailed { reason: String },
|
||||
|
||||
|
||||
#[error("Telegram handshake timeout")]
|
||||
TgHandshakeTimeout,
|
||||
|
||||
|
||||
// ============= Network Errors =============
|
||||
|
||||
#[error("Connection timeout to {addr}")]
|
||||
ConnectionTimeout { addr: String },
|
||||
|
||||
|
||||
#[error("Connection refused by {addr}")]
|
||||
ConnectionRefused { addr: String },
|
||||
|
||||
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
|
||||
// ============= Proxy Protocol Errors =============
|
||||
|
||||
#[error("Invalid proxy protocol header")]
|
||||
InvalidProxyProtocol,
|
||||
|
||||
|
||||
#[error("Unknown TLS SNI")]
|
||||
UnknownTlsSni,
|
||||
|
||||
#[error("Proxy error: {0}")]
|
||||
Proxy(String),
|
||||
|
||||
|
||||
// ============= Config Errors =============
|
||||
|
||||
#[error("Config error: {0}")]
|
||||
Config(String),
|
||||
|
||||
|
||||
#[error("Invalid secret for user {user}: {reason}")]
|
||||
InvalidSecret { user: String, reason: String },
|
||||
|
||||
|
||||
// ============= User Errors =============
|
||||
|
||||
#[error("User {user} expired")]
|
||||
UserExpired { user: String },
|
||||
|
||||
|
||||
#[error("User {user} exceeded connection limit")]
|
||||
ConnectionLimitExceeded { user: String },
|
||||
|
||||
|
||||
#[error("User {user} exceeded data quota")]
|
||||
DataQuotaExceeded { user: String },
|
||||
|
||||
|
||||
#[error("Unknown user")]
|
||||
UnknownUser,
|
||||
|
||||
|
||||
#[error("Rate limited")]
|
||||
RateLimited,
|
||||
|
||||
|
||||
// ============= General Errors =============
|
||||
|
||||
#[error("Internal error: {0}")]
|
||||
Internal(String),
|
||||
}
|
||||
|
|
@ -269,7 +260,7 @@ impl Recoverable for ProxyError {
|
|||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn can_continue(&self) -> bool {
|
||||
match self {
|
||||
Self::Stream(e) => e.can_continue(),
|
||||
|
|
@ -301,17 +292,19 @@ impl<T, R, W> HandshakeResult<T, R, W> {
|
|||
pub fn is_success(&self) -> bool {
|
||||
matches!(self, HandshakeResult::Success(_))
|
||||
}
|
||||
|
||||
|
||||
/// Check if bad client
|
||||
pub fn is_bad_client(&self) -> bool {
|
||||
matches!(self, HandshakeResult::BadClient { .. })
|
||||
}
|
||||
|
||||
|
||||
/// Map the success value
|
||||
pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> HandshakeResult<U, R, W> {
|
||||
match self {
|
||||
HandshakeResult::Success(v) => HandshakeResult::Success(f(v)),
|
||||
HandshakeResult::BadClient { reader, writer } => HandshakeResult::BadClient { reader, writer },
|
||||
HandshakeResult::BadClient { reader, writer } => {
|
||||
HandshakeResult::BadClient { reader, writer }
|
||||
}
|
||||
HandshakeResult::Error(e) => HandshakeResult::Error(e),
|
||||
}
|
||||
}
|
||||
|
|
@ -338,76 +331,104 @@ impl<T, R, W> From<StreamError> for HandshakeResult<T, R, W> {
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_stream_error_display() {
|
||||
let err = StreamError::PartialRead { expected: 100, got: 50 };
|
||||
let err = StreamError::PartialRead {
|
||||
expected: 100,
|
||||
got: 50,
|
||||
};
|
||||
assert!(err.to_string().contains("100"));
|
||||
assert!(err.to_string().contains("50"));
|
||||
|
||||
let err = StreamError::Poisoned { reason: "test".into() };
|
||||
|
||||
let err = StreamError::Poisoned {
|
||||
reason: "test".into(),
|
||||
};
|
||||
assert!(err.to_string().contains("test"));
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_stream_error_recoverable() {
|
||||
assert!(StreamError::PartialRead { expected: 10, got: 5 }.is_recoverable());
|
||||
assert!(StreamError::PartialWrite { expected: 10, written: 5 }.is_recoverable());
|
||||
assert!(
|
||||
StreamError::PartialRead {
|
||||
expected: 10,
|
||||
got: 5
|
||||
}
|
||||
.is_recoverable()
|
||||
);
|
||||
assert!(
|
||||
StreamError::PartialWrite {
|
||||
expected: 10,
|
||||
written: 5
|
||||
}
|
||||
.is_recoverable()
|
||||
);
|
||||
assert!(!StreamError::Poisoned { reason: "x".into() }.is_recoverable());
|
||||
assert!(!StreamError::UnexpectedEof.is_recoverable());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_stream_error_can_continue() {
|
||||
assert!(!StreamError::Poisoned { reason: "x".into() }.can_continue());
|
||||
assert!(!StreamError::UnexpectedEof.can_continue());
|
||||
assert!(StreamError::PartialRead { expected: 10, got: 5 }.can_continue());
|
||||
assert!(
|
||||
StreamError::PartialRead {
|
||||
expected: 10,
|
||||
got: 5
|
||||
}
|
||||
.can_continue()
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_stream_error_to_io_error() {
|
||||
let stream_err = StreamError::UnexpectedEof;
|
||||
let io_err: std::io::Error = stream_err.into();
|
||||
assert_eq!(io_err.kind(), std::io::ErrorKind::UnexpectedEof);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_handshake_result() {
|
||||
let success: HandshakeResult<i32, (), ()> = HandshakeResult::Success(42);
|
||||
assert!(success.is_success());
|
||||
assert!(!success.is_bad_client());
|
||||
|
||||
let bad: HandshakeResult<i32, (), ()> = HandshakeResult::BadClient { reader: (), writer: () };
|
||||
|
||||
let bad: HandshakeResult<i32, (), ()> = HandshakeResult::BadClient {
|
||||
reader: (),
|
||||
writer: (),
|
||||
};
|
||||
assert!(!bad.is_success());
|
||||
assert!(bad.is_bad_client());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_handshake_result_map() {
|
||||
let success: HandshakeResult<i32, (), ()> = HandshakeResult::Success(42);
|
||||
let mapped = success.map(|x| x * 2);
|
||||
|
||||
|
||||
match mapped {
|
||||
HandshakeResult::Success(v) => assert_eq!(v, 84),
|
||||
_ => panic!("Expected success"),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_proxy_error_recoverable() {
|
||||
let err = ProxyError::RateLimited;
|
||||
assert!(err.is_recoverable());
|
||||
|
||||
|
||||
let err = ProxyError::InvalidHandshake("bad".into());
|
||||
assert!(!err.is_recoverable());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_error_display() {
|
||||
let err = ProxyError::ConnectionTimeout { addr: "1.2.3.4:443".into() };
|
||||
let err = ProxyError::ConnectionTimeout {
|
||||
addr: "1.2.3.4:443".into(),
|
||||
};
|
||||
assert!(err.to_string().contains("1.2.3.4:443"));
|
||||
|
||||
|
||||
let err = ProxyError::InvalidProxyProtocol;
|
||||
assert!(err.to_string().contains("proxy protocol"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,252 +1,478 @@
|
|||
// src/ip_tracker.rs
|
||||
// IP address tracking and limiting for users
|
||||
// IP address tracking and per-user unique IP limiting.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::collections::HashMap;
|
||||
use std::net::IpAddr;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use tokio::sync::{Mutex as AsyncMutex, RwLock};
|
||||
|
||||
use crate::config::UserMaxUniqueIpsMode;
|
||||
|
||||
/// Трекер уникальных IP-адресов для каждого пользователя MTProxy
|
||||
///
|
||||
/// Предоставляет thread-safe механизм для:
|
||||
/// - Отслеживания активных IP-адресов каждого пользователя
|
||||
/// - Ограничения количества уникальных IP на пользователя
|
||||
/// - Автоматической очистки при отключении клиентов
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct UserIpTracker {
|
||||
/// Маппинг: Имя пользователя -> Множество активных IP-адресов
|
||||
active_ips: Arc<RwLock<HashMap<String, HashSet<IpAddr>>>>,
|
||||
|
||||
/// Маппинг: Имя пользователя -> Максимально разрешенное количество уникальных IP
|
||||
active_ips: Arc<RwLock<HashMap<String, HashMap<IpAddr, usize>>>>,
|
||||
recent_ips: Arc<RwLock<HashMap<String, HashMap<IpAddr, Instant>>>>,
|
||||
max_ips: Arc<RwLock<HashMap<String, usize>>>,
|
||||
default_max_ips: Arc<RwLock<usize>>,
|
||||
limit_mode: Arc<RwLock<UserMaxUniqueIpsMode>>,
|
||||
limit_window: Arc<RwLock<Duration>>,
|
||||
last_compact_epoch_secs: Arc<AtomicU64>,
|
||||
cleanup_queue: Arc<Mutex<Vec<(String, IpAddr)>>>,
|
||||
cleanup_drain_lock: Arc<AsyncMutex<()>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct UserIpTrackerMemoryStats {
|
||||
pub active_users: usize,
|
||||
pub recent_users: usize,
|
||||
pub active_entries: usize,
|
||||
pub recent_entries: usize,
|
||||
pub cleanup_queue_len: usize,
|
||||
}
|
||||
|
||||
impl UserIpTracker {
|
||||
/// Создать новый пустой трекер
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
active_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||
recent_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||
max_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||
default_max_ips: Arc::new(RwLock::new(0)),
|
||||
limit_mode: Arc::new(RwLock::new(UserMaxUniqueIpsMode::ActiveWindow)),
|
||||
limit_window: Arc::new(RwLock::new(Duration::from_secs(30))),
|
||||
last_compact_epoch_secs: Arc::new(AtomicU64::new(0)),
|
||||
cleanup_queue: Arc::new(Mutex::new(Vec::new())),
|
||||
cleanup_drain_lock: Arc::new(AsyncMutex::new(())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Установить лимит уникальных IP для конкретного пользователя
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `username` - Имя пользователя
|
||||
/// * `max_ips` - Максимальное количество одновременно активных IP-адресов
|
||||
pub fn enqueue_cleanup(&self, user: String, ip: IpAddr) {
|
||||
match self.cleanup_queue.lock() {
|
||||
Ok(mut queue) => queue.push((user, ip)),
|
||||
Err(poisoned) => {
|
||||
let mut queue = poisoned.into_inner();
|
||||
queue.push((user.clone(), ip));
|
||||
self.cleanup_queue.clear_poison();
|
||||
tracing::warn!(
|
||||
"UserIpTracker cleanup_queue lock poisoned; recovered and enqueued IP cleanup for {} ({})",
|
||||
user,
|
||||
ip
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn cleanup_queue_len_for_tests(&self) -> usize {
|
||||
self.cleanup_queue
|
||||
.lock()
|
||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
||||
.len()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn cleanup_queue_mutex_for_tests(&self) -> Arc<Mutex<Vec<(String, IpAddr)>>> {
|
||||
Arc::clone(&self.cleanup_queue)
|
||||
}
|
||||
|
||||
pub(crate) async fn drain_cleanup_queue(&self) {
|
||||
// Serialize queue draining and active-IP mutation so check-and-add cannot
|
||||
// observe stale active entries that are already queued for removal.
|
||||
let _drain_guard = self.cleanup_drain_lock.lock().await;
|
||||
let to_remove = {
|
||||
match self.cleanup_queue.lock() {
|
||||
Ok(mut queue) => {
|
||||
if queue.is_empty() {
|
||||
return;
|
||||
}
|
||||
std::mem::take(&mut *queue)
|
||||
}
|
||||
Err(poisoned) => {
|
||||
let mut queue = poisoned.into_inner();
|
||||
if queue.is_empty() {
|
||||
self.cleanup_queue.clear_poison();
|
||||
return;
|
||||
}
|
||||
let drained = std::mem::take(&mut *queue);
|
||||
self.cleanup_queue.clear_poison();
|
||||
drained
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
for (user, ip) in to_remove {
|
||||
if let Some(user_ips) = active_ips.get_mut(&user) {
|
||||
if let Some(count) = user_ips.get_mut(&ip) {
|
||||
if *count > 1 {
|
||||
*count -= 1;
|
||||
} else {
|
||||
user_ips.remove(&ip);
|
||||
}
|
||||
}
|
||||
if user_ips.is_empty() {
|
||||
active_ips.remove(&user);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn now_epoch_secs() -> u64 {
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
|
||||
async fn maybe_compact_empty_users(&self) {
|
||||
const COMPACT_INTERVAL_SECS: u64 = 60;
|
||||
let now_epoch_secs = Self::now_epoch_secs();
|
||||
let last_compact_epoch_secs = self.last_compact_epoch_secs.load(Ordering::Relaxed);
|
||||
if now_epoch_secs.saturating_sub(last_compact_epoch_secs) < COMPACT_INTERVAL_SECS {
|
||||
return;
|
||||
}
|
||||
if self
|
||||
.last_compact_epoch_secs
|
||||
.compare_exchange(
|
||||
last_compact_epoch_secs,
|
||||
now_epoch_secs,
|
||||
Ordering::AcqRel,
|
||||
Ordering::Relaxed,
|
||||
)
|
||||
.is_err()
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
let mut recent_ips = self.recent_ips.write().await;
|
||||
let window = *self.limit_window.read().await;
|
||||
let now = Instant::now();
|
||||
|
||||
for user_recent in recent_ips.values_mut() {
|
||||
Self::prune_recent(user_recent, now, window);
|
||||
}
|
||||
|
||||
let mut users =
|
||||
Vec::<String>::with_capacity(active_ips.len().saturating_add(recent_ips.len()));
|
||||
users.extend(active_ips.keys().cloned());
|
||||
for user in recent_ips.keys() {
|
||||
if !active_ips.contains_key(user) {
|
||||
users.push(user.clone());
|
||||
}
|
||||
}
|
||||
|
||||
for user in users {
|
||||
let active_empty = active_ips
|
||||
.get(&user)
|
||||
.map(|ips| ips.is_empty())
|
||||
.unwrap_or(true);
|
||||
let recent_empty = recent_ips
|
||||
.get(&user)
|
||||
.map(|ips| ips.is_empty())
|
||||
.unwrap_or(true);
|
||||
if active_empty && recent_empty {
|
||||
active_ips.remove(&user);
|
||||
recent_ips.remove(&user);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn memory_stats(&self) -> UserIpTrackerMemoryStats {
|
||||
let cleanup_queue_len = self
|
||||
.cleanup_queue
|
||||
.lock()
|
||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
||||
.len();
|
||||
let active_ips = self.active_ips.read().await;
|
||||
let recent_ips = self.recent_ips.read().await;
|
||||
let active_entries = active_ips.values().map(HashMap::len).sum();
|
||||
let recent_entries = recent_ips.values().map(HashMap::len).sum();
|
||||
|
||||
UserIpTrackerMemoryStats {
|
||||
active_users: active_ips.len(),
|
||||
recent_users: recent_ips.len(),
|
||||
active_entries,
|
||||
recent_entries,
|
||||
cleanup_queue_len,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn set_limit_policy(&self, mode: UserMaxUniqueIpsMode, window_secs: u64) {
|
||||
{
|
||||
let mut current_mode = self.limit_mode.write().await;
|
||||
*current_mode = mode;
|
||||
}
|
||||
let mut current_window = self.limit_window.write().await;
|
||||
*current_window = Duration::from_secs(window_secs.max(1));
|
||||
}
|
||||
|
||||
pub async fn set_user_limit(&self, username: &str, max_ips: usize) {
|
||||
let mut limits = self.max_ips.write().await;
|
||||
limits.insert(username.to_string(), max_ips);
|
||||
}
|
||||
|
||||
/// Загрузить лимиты из конфигурации
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `limits` - HashMap с лимитами из config.toml
|
||||
pub async fn load_limits(&self, limits: &HashMap<String, usize>) {
|
||||
let mut max_ips = self.max_ips.write().await;
|
||||
for (user, limit) in limits {
|
||||
max_ips.insert(user.clone(), *limit);
|
||||
}
|
||||
pub async fn remove_user_limit(&self, username: &str) {
|
||||
let mut limits = self.max_ips.write().await;
|
||||
limits.remove(username);
|
||||
}
|
||||
|
||||
pub async fn load_limits(&self, default_limit: usize, limits: &HashMap<String, usize>) {
|
||||
let mut default_max_ips = self.default_max_ips.write().await;
|
||||
*default_max_ips = default_limit;
|
||||
drop(default_max_ips);
|
||||
let mut max_ips = self.max_ips.write().await;
|
||||
max_ips.clone_from(limits);
|
||||
}
|
||||
|
||||
fn prune_recent(user_recent: &mut HashMap<IpAddr, Instant>, now: Instant, window: Duration) {
|
||||
if user_recent.is_empty() {
|
||||
return;
|
||||
}
|
||||
user_recent.retain(|_, seen_at| now.duration_since(*seen_at) <= window);
|
||||
}
|
||||
|
||||
/// Проверить, может ли пользователь подключиться с данного IP-адреса
|
||||
/// и добавить IP в список активных, если проверка успешна
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `username` - Имя пользователя
|
||||
/// * `ip` - IP-адрес клиента
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Ok(())` - Подключение разрешено, IP добавлен в активные
|
||||
/// * `Err(String)` - Подключение отклонено с описанием причины
|
||||
pub async fn check_and_add(&self, username: &str, ip: IpAddr) -> Result<(), String> {
|
||||
// Получаем лимит для пользователя
|
||||
let max_ips = self.max_ips.read().await;
|
||||
let limit = match max_ips.get(username) {
|
||||
Some(limit) => *limit,
|
||||
None => {
|
||||
// Если лимит не задан - разрешаем безлимитный доступ
|
||||
drop(max_ips);
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
let user_ips = active_ips
|
||||
.entry(username.to_string())
|
||||
.or_insert_with(HashSet::new);
|
||||
user_ips.insert(ip);
|
||||
return Ok(());
|
||||
}
|
||||
self.drain_cleanup_queue().await;
|
||||
self.maybe_compact_empty_users().await;
|
||||
let default_max_ips = *self.default_max_ips.read().await;
|
||||
let limit = {
|
||||
let max_ips = self.max_ips.read().await;
|
||||
max_ips
|
||||
.get(username)
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((default_max_ips > 0).then_some(default_max_ips))
|
||||
};
|
||||
drop(max_ips);
|
||||
let mode = *self.limit_mode.read().await;
|
||||
let window = *self.limit_window.read().await;
|
||||
let now = Instant::now();
|
||||
|
||||
// Проверяем и обновляем активные IP
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
let user_ips = active_ips
|
||||
let user_active = active_ips
|
||||
.entry(username.to_string())
|
||||
.or_insert_with(HashSet::new);
|
||||
.or_insert_with(HashMap::new);
|
||||
|
||||
// Если IP уже есть в списке - это повторное подключение, разрешаем
|
||||
if user_ips.contains(&ip) {
|
||||
let mut recent_ips = self.recent_ips.write().await;
|
||||
let user_recent = recent_ips
|
||||
.entry(username.to_string())
|
||||
.or_insert_with(HashMap::new);
|
||||
Self::prune_recent(user_recent, now, window);
|
||||
|
||||
if let Some(count) = user_active.get_mut(&ip) {
|
||||
*count = count.saturating_add(1);
|
||||
user_recent.insert(ip, now);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Проверяем, не превышен ли лимит
|
||||
if user_ips.len() >= limit {
|
||||
return Err(format!(
|
||||
"IP limit reached for user '{}': {}/{} unique IPs already connected",
|
||||
username,
|
||||
user_ips.len(),
|
||||
limit
|
||||
));
|
||||
if let Some(limit) = limit {
|
||||
let active_limit_reached = user_active.len() >= limit;
|
||||
let recent_limit_reached = user_recent.len() >= limit;
|
||||
let deny = match mode {
|
||||
UserMaxUniqueIpsMode::ActiveWindow => active_limit_reached,
|
||||
UserMaxUniqueIpsMode::TimeWindow => recent_limit_reached,
|
||||
UserMaxUniqueIpsMode::Combined => active_limit_reached || recent_limit_reached,
|
||||
};
|
||||
|
||||
if deny {
|
||||
return Err(format!(
|
||||
"IP limit reached for user '{}': active={}/{} recent={}/{} mode={:?}",
|
||||
username,
|
||||
user_active.len(),
|
||||
limit,
|
||||
user_recent.len(),
|
||||
limit,
|
||||
mode
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Лимит не превышен - добавляем новый IP
|
||||
user_ips.insert(ip);
|
||||
user_active.insert(ip, 1);
|
||||
user_recent.insert(ip, now);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Удалить IP-адрес из списка активных при отключении клиента
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `username` - Имя пользователя
|
||||
/// * `ip` - IP-адрес отключившегося клиента
|
||||
pub async fn remove_ip(&self, username: &str, ip: IpAddr) {
|
||||
self.maybe_compact_empty_users().await;
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
|
||||
if let Some(user_ips) = active_ips.get_mut(username) {
|
||||
user_ips.remove(&ip);
|
||||
|
||||
// Если у пользователя не осталось активных IP - удаляем запись
|
||||
// для экономии памяти
|
||||
if let Some(count) = user_ips.get_mut(&ip) {
|
||||
if *count > 1 {
|
||||
*count -= 1;
|
||||
} else {
|
||||
user_ips.remove(&ip);
|
||||
}
|
||||
}
|
||||
if user_ips.is_empty() {
|
||||
active_ips.remove(username);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Получить текущее количество активных IP-адресов для пользователя
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `username` - Имя пользователя
|
||||
///
|
||||
/// # Returns
|
||||
/// Количество уникальных активных IP-адресов
|
||||
pub async fn get_active_ip_count(&self, username: &str) -> usize {
|
||||
let active_ips = self.active_ips.read().await;
|
||||
active_ips
|
||||
.get(username)
|
||||
.map(|ips| ips.len())
|
||||
.unwrap_or(0)
|
||||
pub async fn get_recent_counts_for_users(&self, users: &[String]) -> HashMap<String, usize> {
|
||||
self.drain_cleanup_queue().await;
|
||||
let window = *self.limit_window.read().await;
|
||||
let now = Instant::now();
|
||||
let recent_ips = self.recent_ips.read().await;
|
||||
|
||||
let mut counts = HashMap::with_capacity(users.len());
|
||||
for user in users {
|
||||
let count = if let Some(user_recent) = recent_ips.get(user) {
|
||||
user_recent
|
||||
.values()
|
||||
.filter(|seen_at| now.duration_since(**seen_at) <= window)
|
||||
.count()
|
||||
} else {
|
||||
0
|
||||
};
|
||||
counts.insert(user.clone(), count);
|
||||
}
|
||||
counts
|
||||
}
|
||||
|
||||
pub async fn get_active_ips_for_users(&self, users: &[String]) -> HashMap<String, Vec<IpAddr>> {
|
||||
self.drain_cleanup_queue().await;
|
||||
let active_ips = self.active_ips.read().await;
|
||||
let mut out = HashMap::with_capacity(users.len());
|
||||
for user in users {
|
||||
let mut ips = active_ips
|
||||
.get(user)
|
||||
.map(|per_ip| per_ip.keys().copied().collect::<Vec<_>>())
|
||||
.unwrap_or_else(Vec::new);
|
||||
ips.sort();
|
||||
out.insert(user.clone(), ips);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
pub async fn get_recent_ips_for_users(&self, users: &[String]) -> HashMap<String, Vec<IpAddr>> {
|
||||
self.drain_cleanup_queue().await;
|
||||
let window = *self.limit_window.read().await;
|
||||
let now = Instant::now();
|
||||
let recent_ips = self.recent_ips.read().await;
|
||||
|
||||
let mut out = HashMap::with_capacity(users.len());
|
||||
for user in users {
|
||||
let mut ips = if let Some(user_recent) = recent_ips.get(user) {
|
||||
user_recent
|
||||
.iter()
|
||||
.filter(|(_, seen_at)| now.duration_since(**seen_at) <= window)
|
||||
.map(|(ip, _)| *ip)
|
||||
.collect::<Vec<_>>()
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
ips.sort();
|
||||
out.insert(user.clone(), ips);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
pub async fn get_active_ip_count(&self, username: &str) -> usize {
|
||||
self.drain_cleanup_queue().await;
|
||||
let active_ips = self.active_ips.read().await;
|
||||
active_ips.get(username).map(|ips| ips.len()).unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Получить список всех активных IP-адресов для пользователя
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `username` - Имя пользователя
|
||||
///
|
||||
/// # Returns
|
||||
/// Вектор с активными IP-адресами
|
||||
pub async fn get_active_ips(&self, username: &str) -> Vec<IpAddr> {
|
||||
self.drain_cleanup_queue().await;
|
||||
let active_ips = self.active_ips.read().await;
|
||||
active_ips
|
||||
.get(username)
|
||||
.map(|ips| ips.iter().copied().collect())
|
||||
.map(|ips| ips.keys().copied().collect())
|
||||
.unwrap_or_else(Vec::new)
|
||||
}
|
||||
|
||||
/// Получить статистику по всем пользователям
|
||||
///
|
||||
/// # Returns
|
||||
/// Вектор кортежей: (имя_пользователя, количество_активных_IP, лимит)
|
||||
pub async fn get_stats(&self) -> Vec<(String, usize, usize)> {
|
||||
self.drain_cleanup_queue().await;
|
||||
let active_ips = self.active_ips.read().await;
|
||||
let max_ips = self.max_ips.read().await;
|
||||
let default_max_ips = *self.default_max_ips.read().await;
|
||||
|
||||
let mut stats = Vec::new();
|
||||
|
||||
// Собираем статистику по пользователям с активными подключениями
|
||||
for (username, user_ips) in active_ips.iter() {
|
||||
let limit = max_ips.get(username).copied().unwrap_or(0);
|
||||
let limit = max_ips
|
||||
.get(username)
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((default_max_ips > 0).then_some(default_max_ips))
|
||||
.unwrap_or(0);
|
||||
stats.push((username.clone(), user_ips.len(), limit));
|
||||
}
|
||||
|
||||
stats.sort_by(|a, b| a.0.cmp(&b.0)); // Сортируем по имени пользователя
|
||||
|
||||
stats.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
stats
|
||||
}
|
||||
|
||||
/// Очистить все активные IP для пользователя (при необходимости)
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `username` - Имя пользователя
|
||||
pub async fn clear_user_ips(&self, username: &str) {
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
active_ips.remove(username);
|
||||
drop(active_ips);
|
||||
|
||||
let mut recent_ips = self.recent_ips.write().await;
|
||||
recent_ips.remove(username);
|
||||
}
|
||||
|
||||
/// Очистить всю статистику (использовать с осторожностью!)
|
||||
pub async fn clear_all(&self) {
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
active_ips.clear();
|
||||
drop(active_ips);
|
||||
|
||||
let mut recent_ips = self.recent_ips.write().await;
|
||||
recent_ips.clear();
|
||||
}
|
||||
|
||||
/// Проверить, подключен ли пользователь с данного IP
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `username` - Имя пользователя
|
||||
/// * `ip` - IP-адрес для проверки
|
||||
///
|
||||
/// # Returns
|
||||
/// `true` если IP активен, `false` если нет
|
||||
pub async fn is_ip_active(&self, username: &str, ip: IpAddr) -> bool {
|
||||
self.drain_cleanup_queue().await;
|
||||
let active_ips = self.active_ips.read().await;
|
||||
active_ips
|
||||
.get(username)
|
||||
.map(|ips| ips.contains(&ip))
|
||||
.map(|ips| ips.contains_key(&ip))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Получить лимит для пользователя
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `username` - Имя пользователя
|
||||
///
|
||||
/// # Returns
|
||||
/// Лимит IP-адресов или None, если лимит не установлен
|
||||
pub async fn get_user_limit(&self, username: &str) -> Option<usize> {
|
||||
let default_max_ips = *self.default_max_ips.read().await;
|
||||
let max_ips = self.max_ips.read().await;
|
||||
max_ips.get(username).copied()
|
||||
max_ips
|
||||
.get(username)
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((default_max_ips > 0).then_some(default_max_ips))
|
||||
}
|
||||
|
||||
/// Форматировать статистику в читаемый текст
|
||||
///
|
||||
/// # Returns
|
||||
/// Строка со статистикой для логов или мониторинга
|
||||
pub async fn format_stats(&self) -> String {
|
||||
let stats = self.get_stats().await;
|
||||
|
||||
|
||||
if stats.is_empty() {
|
||||
return String::from("No active users");
|
||||
}
|
||||
|
||||
|
||||
let mut output = String::from("User IP Statistics:\n");
|
||||
output.push_str("==================\n");
|
||||
|
||||
|
||||
for (username, active_count, limit) in stats {
|
||||
output.push_str(&format!(
|
||||
"User: {:<20} Active IPs: {}/{}\n",
|
||||
username,
|
||||
active_count,
|
||||
if limit > 0 { limit.to_string() } else { "unlimited".to_string() }
|
||||
if limit > 0 {
|
||||
limit.to_string()
|
||||
} else {
|
||||
"unlimited".to_string()
|
||||
}
|
||||
));
|
||||
|
||||
|
||||
let ips = self.get_active_ips(&username).await;
|
||||
for ip in ips {
|
||||
output.push_str(&format!(" └─ {}\n", ip));
|
||||
output.push_str(&format!(" - {}\n", ip));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
output
|
||||
}
|
||||
}
|
||||
|
|
@ -257,14 +483,11 @@ impl Default for UserIpTracker {
|
|||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// ТЕСТЫ
|
||||
// ============================================================================
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
fn test_ipv4(oct1: u8, oct2: u8, oct3: u8, oct4: u8) -> IpAddr {
|
||||
IpAddr::V4(Ipv4Addr::new(oct1, oct2, oct3, oct4))
|
||||
|
|
@ -283,17 +506,33 @@ mod tests {
|
|||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
let ip3 = test_ipv4(192, 168, 1, 3);
|
||||
|
||||
// Первые два IP должны быть приняты
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
|
||||
// Третий IP должен быть отклонен
|
||||
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
|
||||
|
||||
// Проверяем счетчик
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_active_window_rejects_new_ip_and_keeps_existing_session() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 1).await;
|
||||
tracker
|
||||
.set_limit_policy(UserMaxUniqueIpsMode::ActiveWindow, 30)
|
||||
.await;
|
||||
|
||||
let ip1 = test_ipv4(10, 10, 10, 1);
|
||||
let ip2 = test_ipv4(10, 10, 10, 2);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.is_ip_active("test_user", ip1).await);
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
|
||||
// Existing session remains active; only new unique IP is denied.
|
||||
assert!(tracker.is_ip_active("test_user", ip1).await);
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_reconnection_from_same_ip() {
|
||||
let tracker = UserIpTracker::new();
|
||||
|
|
@ -301,16 +540,29 @@ mod tests {
|
|||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
|
||||
// Первое подключение
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
|
||||
// Повторное подключение с того же IP должно пройти
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
|
||||
// Счетчик не должен увеличиться
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_same_ip_disconnect_keeps_active_while_other_session_alive() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 2).await;
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ip_removal() {
|
||||
let tracker = UserIpTracker::new();
|
||||
|
|
@ -320,36 +572,28 @@ mod tests {
|
|||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
let ip3 = test_ipv4(192, 168, 1, 3);
|
||||
|
||||
// Добавляем два IP
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
|
||||
// Третий не должен пройти
|
||||
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
|
||||
|
||||
// Удаляем первый IP
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
|
||||
// Теперь третий должен пройти
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip3).await.is_ok());
|
||||
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_no_limit() {
|
||||
let tracker = UserIpTracker::new();
|
||||
// Не устанавливаем лимит для test_user
|
||||
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
let ip3 = test_ipv4(192, 168, 1, 3);
|
||||
|
||||
// Без лимита все IP должны проходить
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip3).await.is_ok());
|
||||
|
||||
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 3);
|
||||
}
|
||||
|
||||
|
|
@ -362,11 +606,9 @@ mod tests {
|
|||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
|
||||
// user1 может использовать 2 IP
|
||||
assert!(tracker.check_and_add("user1", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("user1", ip2).await.is_ok());
|
||||
|
||||
// user2 может использовать только 1 IP
|
||||
assert!(tracker.check_and_add("user2", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("user2", ip2).await.is_err());
|
||||
}
|
||||
|
|
@ -379,10 +621,9 @@ mod tests {
|
|||
let ipv4 = test_ipv4(192, 168, 1, 1);
|
||||
let ipv6 = test_ipv6();
|
||||
|
||||
// Должны работать оба типа адресов
|
||||
assert!(tracker.check_and_add("test_user", ipv4).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ipv6).await.is_ok());
|
||||
|
||||
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
|
||||
}
|
||||
|
||||
|
|
@ -417,8 +658,7 @@ mod tests {
|
|||
|
||||
let stats = tracker.get_stats().await;
|
||||
assert_eq!(stats.len(), 2);
|
||||
|
||||
// Проверяем наличие обоих пользователей в статистике
|
||||
|
||||
assert!(stats.iter().any(|(name, _, _)| name == "user1"));
|
||||
assert!(stats.iter().any(|(name, _, _)| name == "user2"));
|
||||
}
|
||||
|
|
@ -427,10 +667,10 @@ mod tests {
|
|||
async fn test_clear_user_ips() {
|
||||
let tracker = UserIpTracker::new();
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
|
||||
|
||||
tracker.check_and_add("test_user", ip1).await.unwrap();
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||
|
||||
|
||||
tracker.clear_user_ips("test_user").await;
|
||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 0);
|
||||
}
|
||||
|
|
@ -440,9 +680,9 @@ mod tests {
|
|||
let tracker = UserIpTracker::new();
|
||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||
|
||||
|
||||
tracker.check_and_add("test_user", ip1).await.unwrap();
|
||||
|
||||
|
||||
assert!(tracker.is_ip_active("test_user", ip1).await);
|
||||
assert!(!tracker.is_ip_active("test_user", ip2).await);
|
||||
}
|
||||
|
|
@ -450,15 +690,165 @@ mod tests {
|
|||
#[tokio::test]
|
||||
async fn test_load_limits_from_config() {
|
||||
let tracker = UserIpTracker::new();
|
||||
|
||||
|
||||
let mut config_limits = HashMap::new();
|
||||
config_limits.insert("user1".to_string(), 5);
|
||||
config_limits.insert("user2".to_string(), 3);
|
||||
|
||||
tracker.load_limits(&config_limits).await;
|
||||
|
||||
|
||||
tracker.load_limits(0, &config_limits).await;
|
||||
|
||||
assert_eq!(tracker.get_user_limit("user1").await, Some(5));
|
||||
assert_eq!(tracker.get_user_limit("user2").await, Some(3));
|
||||
assert_eq!(tracker.get_user_limit("user3").await, None);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_load_limits_replaces_previous_map() {
|
||||
let tracker = UserIpTracker::new();
|
||||
|
||||
let mut first = HashMap::new();
|
||||
first.insert("user1".to_string(), 2);
|
||||
first.insert("user2".to_string(), 3);
|
||||
tracker.load_limits(0, &first).await;
|
||||
|
||||
let mut second = HashMap::new();
|
||||
second.insert("user2".to_string(), 5);
|
||||
tracker.load_limits(0, &second).await;
|
||||
|
||||
assert_eq!(tracker.get_user_limit("user1").await, None);
|
||||
assert_eq!(tracker.get_user_limit("user2").await, Some(5));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_global_each_limit_applies_without_user_override() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.load_limits(2, &HashMap::new()).await;
|
||||
|
||||
let ip1 = test_ipv4(172, 16, 0, 1);
|
||||
let ip2 = test_ipv4(172, 16, 0, 2);
|
||||
let ip3 = test_ipv4(172, 16, 0, 3);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
|
||||
assert_eq!(tracker.get_user_limit("test_user").await, Some(2));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_user_override_wins_over_global_each_limit() {
|
||||
let tracker = UserIpTracker::new();
|
||||
let mut limits = HashMap::new();
|
||||
limits.insert("test_user".to_string(), 1);
|
||||
tracker.load_limits(3, &limits).await;
|
||||
|
||||
let ip1 = test_ipv4(172, 17, 0, 1);
|
||||
let ip2 = test_ipv4(172, 17, 0, 2);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
assert_eq!(tracker.get_user_limit("test_user").await, Some(1));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_time_window_mode_blocks_recent_ip_churn() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 1).await;
|
||||
tracker
|
||||
.set_limit_policy(UserMaxUniqueIpsMode::TimeWindow, 30)
|
||||
.await;
|
||||
|
||||
let ip1 = test_ipv4(10, 0, 0, 1);
|
||||
let ip2 = test_ipv4(10, 0, 0, 2);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_combined_mode_enforces_active_and_recent_limits() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 1).await;
|
||||
tracker
|
||||
.set_limit_policy(UserMaxUniqueIpsMode::Combined, 30)
|
||||
.await;
|
||||
|
||||
let ip1 = test_ipv4(10, 0, 1, 1);
|
||||
let ip2 = test_ipv4(10, 0, 1, 2);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_time_window_expires() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 1).await;
|
||||
tracker
|
||||
.set_limit_policy(UserMaxUniqueIpsMode::TimeWindow, 1)
|
||||
.await;
|
||||
|
||||
let ip1 = test_ipv4(10, 1, 0, 1);
|
||||
let ip2 = test_ipv4(10, 1, 0, 2);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
tracker.remove_ip("test_user", ip1).await;
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(1100)).await;
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_memory_stats_reports_queue_and_entry_counts() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 4).await;
|
||||
let ip1 = test_ipv4(10, 2, 0, 1);
|
||||
let ip2 = test_ipv4(10, 2, 0, 2);
|
||||
|
||||
tracker.check_and_add("test_user", ip1).await.unwrap();
|
||||
tracker.check_and_add("test_user", ip2).await.unwrap();
|
||||
tracker.enqueue_cleanup("test_user".to_string(), ip1);
|
||||
|
||||
let snapshot = tracker.memory_stats().await;
|
||||
assert_eq!(snapshot.active_users, 1);
|
||||
assert_eq!(snapshot.recent_users, 1);
|
||||
assert_eq!(snapshot.active_entries, 2);
|
||||
assert_eq!(snapshot.recent_entries, 2);
|
||||
assert_eq!(snapshot.cleanup_queue_len, 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_compact_prunes_stale_recent_entries() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker
|
||||
.set_limit_policy(UserMaxUniqueIpsMode::TimeWindow, 1)
|
||||
.await;
|
||||
|
||||
let stale_user = "stale-user".to_string();
|
||||
let stale_ip = test_ipv4(10, 3, 0, 1);
|
||||
{
|
||||
let mut recent_ips = tracker.recent_ips.write().await;
|
||||
recent_ips
|
||||
.entry(stale_user.clone())
|
||||
.or_insert_with(HashMap::new)
|
||||
.insert(stale_ip, Instant::now() - Duration::from_secs(5));
|
||||
}
|
||||
|
||||
tracker.last_compact_epoch_secs.store(0, Ordering::Relaxed);
|
||||
tracker
|
||||
.check_and_add("trigger-user", test_ipv4(10, 3, 0, 2))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let recent_ips = tracker.recent_ips.read().await;
|
||||
let stale_exists = recent_ips
|
||||
.get(&stale_user)
|
||||
.map(|ips| ips.contains_key(&stale_ip))
|
||||
.unwrap_or(false);
|
||||
assert!(!stale_exists);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,343 @@
|
|||
//! Logging configuration for telemt.
|
||||
//!
|
||||
//! Supports multiple log destinations:
|
||||
//! - stderr (default, works with systemd journald)
|
||||
//! - syslog (Unix only, for traditional init systems)
|
||||
//! - file (with optional rotation)
|
||||
|
||||
#![allow(dead_code)] // Infrastructure module - used via CLI flags
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
use tracing_subscriber::{EnvFilter, fmt, reload};
|
||||
|
||||
/// Log destination configuration.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub enum LogDestination {
|
||||
/// Log to stderr (default, captured by systemd journald).
|
||||
#[default]
|
||||
Stderr,
|
||||
/// Log to syslog (Unix only).
|
||||
#[cfg(unix)]
|
||||
Syslog,
|
||||
/// Log to a file with optional rotation.
|
||||
File {
|
||||
path: String,
|
||||
/// Rotate daily if true.
|
||||
rotate_daily: bool,
|
||||
},
|
||||
}
|
||||
|
||||
/// Logging options parsed from CLI/config.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct LoggingOptions {
|
||||
/// Where to send logs.
|
||||
pub destination: LogDestination,
|
||||
/// Disable ANSI colors.
|
||||
pub disable_colors: bool,
|
||||
}
|
||||
|
||||
/// Guard that must be held to keep file logging active.
|
||||
/// When dropped, flushes and closes log files.
|
||||
pub struct LoggingGuard {
|
||||
_guard: Option<tracing_appender::non_blocking::WorkerGuard>,
|
||||
}
|
||||
|
||||
impl LoggingGuard {
|
||||
fn new(guard: Option<tracing_appender::non_blocking::WorkerGuard>) -> Self {
|
||||
Self { _guard: guard }
|
||||
}
|
||||
|
||||
/// Creates a no-op guard for stderr/syslog logging.
|
||||
pub fn noop() -> Self {
|
||||
Self { _guard: None }
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize the tracing subscriber with the specified options.
|
||||
///
|
||||
/// Returns a reload handle for dynamic log level changes and a guard
|
||||
/// that must be kept alive for file logging.
|
||||
pub fn init_logging(
|
||||
opts: &LoggingOptions,
|
||||
initial_filter: &str,
|
||||
) -> (
|
||||
reload::Handle<EnvFilter, impl tracing::Subscriber + Send + Sync>,
|
||||
LoggingGuard,
|
||||
) {
|
||||
let (filter_layer, filter_handle) = reload::Layer::new(EnvFilter::new(initial_filter));
|
||||
|
||||
match &opts.destination {
|
||||
LogDestination::Stderr => {
|
||||
let fmt_layer = fmt::Layer::default()
|
||||
.with_ansi(!opts.disable_colors)
|
||||
.with_target(true);
|
||||
|
||||
tracing_subscriber::registry()
|
||||
.with(filter_layer)
|
||||
.with(fmt_layer)
|
||||
.init();
|
||||
|
||||
(filter_handle, LoggingGuard::noop())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
LogDestination::Syslog => {
|
||||
// Use a custom fmt layer that writes to syslog
|
||||
let fmt_layer = fmt::Layer::default()
|
||||
.with_ansi(false)
|
||||
.with_target(false)
|
||||
.with_level(false)
|
||||
.without_time()
|
||||
.with_writer(SyslogMakeWriter::new());
|
||||
|
||||
tracing_subscriber::registry()
|
||||
.with(filter_layer)
|
||||
.with(fmt_layer)
|
||||
.init();
|
||||
|
||||
(filter_handle, LoggingGuard::noop())
|
||||
}
|
||||
|
||||
LogDestination::File { path, rotate_daily } => {
|
||||
let (non_blocking, guard) = if *rotate_daily {
|
||||
// Extract directory and filename prefix
|
||||
let path = Path::new(path);
|
||||
let dir = path.parent().unwrap_or(Path::new("/var/log"));
|
||||
let prefix = path
|
||||
.file_name()
|
||||
.and_then(|s| s.to_str())
|
||||
.unwrap_or("telemt");
|
||||
|
||||
let file_appender = tracing_appender::rolling::daily(dir, prefix);
|
||||
tracing_appender::non_blocking(file_appender)
|
||||
} else {
|
||||
let file = std::fs::OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(path)
|
||||
.expect("Failed to open log file");
|
||||
tracing_appender::non_blocking(file)
|
||||
};
|
||||
|
||||
let fmt_layer = fmt::Layer::default()
|
||||
.with_ansi(false)
|
||||
.with_target(true)
|
||||
.with_writer(non_blocking);
|
||||
|
||||
tracing_subscriber::registry()
|
||||
.with(filter_layer)
|
||||
.with(fmt_layer)
|
||||
.init();
|
||||
|
||||
(filter_handle, LoggingGuard::new(Some(guard)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Syslog writer for tracing.
|
||||
#[cfg(unix)]
|
||||
#[derive(Clone, Copy)]
|
||||
struct SyslogMakeWriter;
|
||||
|
||||
#[cfg(unix)]
|
||||
#[derive(Clone, Copy)]
|
||||
struct SyslogWriter {
|
||||
priority: libc::c_int,
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
impl SyslogMakeWriter {
|
||||
fn new() -> Self {
|
||||
// Open syslog connection on first use
|
||||
static INIT: std::sync::Once = std::sync::Once::new();
|
||||
INIT.call_once(|| {
|
||||
unsafe {
|
||||
// Open syslog with ident "telemt", LOG_PID, LOG_DAEMON facility
|
||||
let ident = b"telemt\0".as_ptr() as *const libc::c_char;
|
||||
libc::openlog(ident, libc::LOG_PID | libc::LOG_NDELAY, libc::LOG_DAEMON);
|
||||
}
|
||||
});
|
||||
Self
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn syslog_priority_for_level(level: &tracing::Level) -> libc::c_int {
|
||||
match *level {
|
||||
tracing::Level::ERROR => libc::LOG_ERR,
|
||||
tracing::Level::WARN => libc::LOG_WARNING,
|
||||
tracing::Level::INFO => libc::LOG_INFO,
|
||||
tracing::Level::DEBUG => libc::LOG_DEBUG,
|
||||
tracing::Level::TRACE => libc::LOG_DEBUG,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
impl std::io::Write for SyslogWriter {
|
||||
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
|
||||
// Convert to C string, stripping newlines
|
||||
let msg = String::from_utf8_lossy(buf);
|
||||
let msg = msg.trim_end();
|
||||
|
||||
if msg.is_empty() {
|
||||
return Ok(buf.len());
|
||||
}
|
||||
|
||||
// Write to syslog
|
||||
let c_msg = std::ffi::CString::new(msg.as_bytes())
|
||||
.unwrap_or_else(|_| std::ffi::CString::new("(invalid utf8)").unwrap());
|
||||
|
||||
unsafe {
|
||||
libc::syslog(
|
||||
self.priority,
|
||||
b"%s\0".as_ptr() as *const libc::c_char,
|
||||
c_msg.as_ptr(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> std::io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
impl<'a> tracing_subscriber::fmt::MakeWriter<'a> for SyslogMakeWriter {
|
||||
type Writer = SyslogWriter;
|
||||
|
||||
fn make_writer(&'a self) -> Self::Writer {
|
||||
SyslogWriter {
|
||||
priority: libc::LOG_INFO,
|
||||
}
|
||||
}
|
||||
|
||||
fn make_writer_for(&'a self, meta: &tracing::Metadata<'_>) -> Self::Writer {
|
||||
SyslogWriter {
|
||||
priority: syslog_priority_for_level(meta.level()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse log destination from CLI arguments.
|
||||
pub fn parse_log_destination(args: &[String]) -> LogDestination {
|
||||
let mut i = 0;
|
||||
while i < args.len() {
|
||||
match args[i].as_str() {
|
||||
#[cfg(unix)]
|
||||
"--syslog" => {
|
||||
return LogDestination::Syslog;
|
||||
}
|
||||
"--log-file" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
return LogDestination::File {
|
||||
path: args[i].clone(),
|
||||
rotate_daily: false,
|
||||
};
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--log-file=") => {
|
||||
return LogDestination::File {
|
||||
path: s.trim_start_matches("--log-file=").to_string(),
|
||||
rotate_daily: false,
|
||||
};
|
||||
}
|
||||
"--log-file-daily" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
return LogDestination::File {
|
||||
path: args[i].clone(),
|
||||
rotate_daily: true,
|
||||
};
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--log-file-daily=") => {
|
||||
return LogDestination::File {
|
||||
path: s.trim_start_matches("--log-file-daily=").to_string(),
|
||||
rotate_daily: true,
|
||||
};
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
LogDestination::Stderr
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_log_destination_default() {
|
||||
let args: Vec<String> = vec![];
|
||||
assert!(matches!(
|
||||
parse_log_destination(&args),
|
||||
LogDestination::Stderr
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_log_destination_file() {
|
||||
let args = vec!["--log-file".to_string(), "/var/log/telemt.log".to_string()];
|
||||
match parse_log_destination(&args) {
|
||||
LogDestination::File { path, rotate_daily } => {
|
||||
assert_eq!(path, "/var/log/telemt.log");
|
||||
assert!(!rotate_daily);
|
||||
}
|
||||
_ => panic!("Expected File destination"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_log_destination_file_daily() {
|
||||
let args = vec!["--log-file-daily=/var/log/telemt".to_string()];
|
||||
match parse_log_destination(&args) {
|
||||
LogDestination::File { path, rotate_daily } => {
|
||||
assert_eq!(path, "/var/log/telemt");
|
||||
assert!(rotate_daily);
|
||||
}
|
||||
_ => panic!("Expected File destination"),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[test]
|
||||
fn test_parse_log_destination_syslog() {
|
||||
let args = vec!["--syslog".to_string()];
|
||||
assert!(matches!(
|
||||
parse_log_destination(&args),
|
||||
LogDestination::Syslog
|
||||
));
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[test]
|
||||
fn test_syslog_priority_for_level_mapping() {
|
||||
assert_eq!(
|
||||
syslog_priority_for_level(&tracing::Level::ERROR),
|
||||
libc::LOG_ERR
|
||||
);
|
||||
assert_eq!(
|
||||
syslog_priority_for_level(&tracing::Level::WARN),
|
||||
libc::LOG_WARNING
|
||||
);
|
||||
assert_eq!(
|
||||
syslog_priority_for_level(&tracing::Level::INFO),
|
||||
libc::LOG_INFO
|
||||
);
|
||||
assert_eq!(
|
||||
syslog_priority_for_level(&tracing::Level::DEBUG),
|
||||
libc::LOG_DEBUG
|
||||
);
|
||||
assert_eq!(
|
||||
syslog_priority_for_level(&tracing::Level::TRACE),
|
||||
libc::LOG_DEBUG
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,170 @@
|
|||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use tokio::sync::watch;
|
||||
use tracing::{info, warn};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
|
||||
const STARTUP_FALLBACK_AFTER: Duration = Duration::from_secs(80);
|
||||
const RUNTIME_FALLBACK_AFTER: Duration = Duration::from_secs(6);
|
||||
|
||||
pub(crate) async fn configure_admission_gate(
|
||||
config: &Arc<ProxyConfig>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
route_runtime: Arc<RouteRuntimeController>,
|
||||
admission_tx: &watch::Sender<bool>,
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
) {
|
||||
if config.general.use_middle_proxy {
|
||||
if let Some(pool) = me_pool.as_ref() {
|
||||
let initial_ready = pool.admission_ready_conditional_cast().await;
|
||||
let mut fallback_enabled = config.general.me2dc_fallback;
|
||||
let mut fast_fallback_enabled = fallback_enabled && config.general.me2dc_fast;
|
||||
let (initial_gate_open, initial_route_mode, initial_fallback_reason) = if initial_ready
|
||||
{
|
||||
(true, RelayRouteMode::Middle, None)
|
||||
} else if fast_fallback_enabled {
|
||||
(
|
||||
true,
|
||||
RelayRouteMode::Direct,
|
||||
Some("fast_not_ready_fallback"),
|
||||
)
|
||||
} else {
|
||||
(false, RelayRouteMode::Middle, None)
|
||||
};
|
||||
admission_tx.send_replace(initial_gate_open);
|
||||
let _ = route_runtime.set_mode(initial_route_mode);
|
||||
if initial_ready {
|
||||
info!("Conditional-admission gate: open / ME pool READY");
|
||||
} else if let Some(reason) = initial_fallback_reason {
|
||||
warn!(
|
||||
fallback_reason = reason,
|
||||
"Conditional-admission gate opened in ME fast fallback mode"
|
||||
);
|
||||
} else {
|
||||
warn!("Conditional-admission gate: closed / ME pool is NOT ready)");
|
||||
}
|
||||
|
||||
let pool_for_gate = pool.clone();
|
||||
let admission_tx_gate = admission_tx.clone();
|
||||
let route_runtime_gate = route_runtime.clone();
|
||||
let mut config_rx_gate = config_rx.clone();
|
||||
let mut admission_poll_ms = config.general.me_admission_poll_ms.max(1);
|
||||
tokio::spawn(async move {
|
||||
let mut gate_open = initial_gate_open;
|
||||
let mut route_mode = initial_route_mode;
|
||||
let mut ready_observed = initial_ready;
|
||||
let mut not_ready_since = if initial_ready {
|
||||
None
|
||||
} else {
|
||||
Some(Instant::now())
|
||||
};
|
||||
loop {
|
||||
tokio::select! {
|
||||
changed = config_rx_gate.changed() => {
|
||||
if changed.is_err() {
|
||||
break;
|
||||
}
|
||||
let cfg = config_rx_gate.borrow_and_update().clone();
|
||||
admission_poll_ms = cfg.general.me_admission_poll_ms.max(1);
|
||||
fallback_enabled = cfg.general.me2dc_fallback;
|
||||
fast_fallback_enabled = cfg.general.me2dc_fallback && cfg.general.me2dc_fast;
|
||||
continue;
|
||||
}
|
||||
_ = tokio::time::sleep(Duration::from_millis(admission_poll_ms)) => {}
|
||||
}
|
||||
let ready = pool_for_gate.admission_ready_conditional_cast().await;
|
||||
let now = Instant::now();
|
||||
let (next_gate_open, next_route_mode, next_fallback_reason) = if ready {
|
||||
ready_observed = true;
|
||||
not_ready_since = None;
|
||||
(true, RelayRouteMode::Middle, None)
|
||||
} else if fast_fallback_enabled {
|
||||
(
|
||||
true,
|
||||
RelayRouteMode::Direct,
|
||||
Some("fast_not_ready_fallback"),
|
||||
)
|
||||
} else {
|
||||
let not_ready_started_at = *not_ready_since.get_or_insert(now);
|
||||
let not_ready_for = now.saturating_duration_since(not_ready_started_at);
|
||||
let fallback_after = if ready_observed {
|
||||
RUNTIME_FALLBACK_AFTER
|
||||
} else {
|
||||
STARTUP_FALLBACK_AFTER
|
||||
};
|
||||
if fallback_enabled && not_ready_for > fallback_after {
|
||||
(true, RelayRouteMode::Direct, Some("strict_grace_fallback"))
|
||||
} else {
|
||||
(false, RelayRouteMode::Middle, None)
|
||||
}
|
||||
};
|
||||
let next_fallback_active = next_fallback_reason.is_some();
|
||||
|
||||
if next_route_mode != route_mode {
|
||||
route_mode = next_route_mode;
|
||||
if let Some(snapshot) = route_runtime_gate.set_mode(route_mode) {
|
||||
if matches!(route_mode, RelayRouteMode::Middle) {
|
||||
info!(
|
||||
target_mode = route_mode.as_str(),
|
||||
cutover_generation = snapshot.generation,
|
||||
"Middle-End routing restored for new sessions"
|
||||
);
|
||||
} else {
|
||||
let fallback_reason = next_fallback_reason.unwrap_or("unknown");
|
||||
if fallback_reason == "strict_grace_fallback" {
|
||||
let fallback_after = if ready_observed {
|
||||
RUNTIME_FALLBACK_AFTER
|
||||
} else {
|
||||
STARTUP_FALLBACK_AFTER
|
||||
};
|
||||
warn!(
|
||||
target_mode = route_mode.as_str(),
|
||||
cutover_generation = snapshot.generation,
|
||||
grace_secs = fallback_after.as_secs(),
|
||||
fallback_reason,
|
||||
"ME pool stayed not-ready beyond grace; routing new sessions via Direct-DC"
|
||||
);
|
||||
} else {
|
||||
warn!(
|
||||
target_mode = route_mode.as_str(),
|
||||
cutover_generation = snapshot.generation,
|
||||
fallback_reason,
|
||||
"ME pool not-ready; routing new sessions via Direct-DC (fast mode)"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if next_gate_open != gate_open {
|
||||
gate_open = next_gate_open;
|
||||
admission_tx_gate.send_replace(gate_open);
|
||||
if gate_open {
|
||||
if next_fallback_active {
|
||||
warn!(
|
||||
fallback_reason = next_fallback_reason.unwrap_or("unknown"),
|
||||
"Conditional-admission gate opened in ME fallback mode"
|
||||
);
|
||||
} else {
|
||||
info!("Conditional-admission gate opened / ME pool READY");
|
||||
}
|
||||
} else {
|
||||
warn!("Conditional-admission gate closed / ME pool is NOT ready");
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
} else {
|
||||
admission_tx.send_replace(false);
|
||||
let _ = route_runtime.set_mode(RelayRouteMode::Direct);
|
||||
warn!("Conditional-admission gate: closed / ME pool is UNAVAILABLE");
|
||||
}
|
||||
} else {
|
||||
admission_tx.send_replace(true);
|
||||
let _ = route_runtime.set_mode(RelayRouteMode::Direct);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,232 @@
|
|||
#![allow(clippy::too_many_arguments)]
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::info;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::network::probe::NetworkDecision;
|
||||
use crate::startup::{
|
||||
COMPONENT_DC_CONNECTIVITY_PING, COMPONENT_ME_CONNECTIVITY_PING, COMPONENT_RUNTIME_READY,
|
||||
StartupTracker,
|
||||
};
|
||||
use crate::transport::UpstreamManager;
|
||||
use crate::transport::middle_proxy::{
|
||||
MePingFamily, MePingSample, MePool, format_me_route, format_sample_line, run_me_ping,
|
||||
};
|
||||
|
||||
pub(crate) async fn run_startup_connectivity(
|
||||
config: &Arc<ProxyConfig>,
|
||||
me_pool: &Option<Arc<MePool>>,
|
||||
rng: Arc<SecureRandom>,
|
||||
startup_tracker: &Arc<StartupTracker>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
prefer_ipv6: bool,
|
||||
decision: &NetworkDecision,
|
||||
process_started_at: Instant,
|
||||
api_me_pool: Arc<RwLock<Option<Arc<MePool>>>>,
|
||||
) {
|
||||
if me_pool.is_some() {
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_ME_CONNECTIVITY_PING,
|
||||
Some("run startup ME connectivity check".to_string()),
|
||||
)
|
||||
.await;
|
||||
} else {
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_CONNECTIVITY_PING,
|
||||
Some("ME pool is not available".to_string()),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
if let Some(pool) = me_pool {
|
||||
let me_results = run_me_ping(pool, &rng).await;
|
||||
|
||||
let v4_ok = me_results.iter().any(|r| {
|
||||
matches!(r.family, MePingFamily::V4)
|
||||
&& r.samples
|
||||
.iter()
|
||||
.any(|s| s.error.is_none() && s.handshake_ms.is_some())
|
||||
});
|
||||
let v6_ok = me_results.iter().any(|r| {
|
||||
matches!(r.family, MePingFamily::V6)
|
||||
&& r.samples
|
||||
.iter()
|
||||
.any(|s| s.error.is_none() && s.handshake_ms.is_some())
|
||||
});
|
||||
|
||||
info!("================= Telegram ME Connectivity =================");
|
||||
if v4_ok && v6_ok {
|
||||
info!(" IPv4 and IPv6 available");
|
||||
} else if v4_ok {
|
||||
info!(" IPv4 only / IPv6 unavailable");
|
||||
} else if v6_ok {
|
||||
info!(" IPv6 only / IPv4 unavailable");
|
||||
} else {
|
||||
info!(" No ME connectivity");
|
||||
}
|
||||
let me_route =
|
||||
format_me_route(&config.upstreams, &me_results, prefer_ipv6, v4_ok, v6_ok).await;
|
||||
info!(" via {}", me_route);
|
||||
info!("============================================================");
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
let mut grouped: BTreeMap<i32, Vec<MePingSample>> = BTreeMap::new();
|
||||
for report in me_results {
|
||||
for s in report.samples {
|
||||
grouped.entry(s.dc).or_default().push(s);
|
||||
}
|
||||
}
|
||||
|
||||
let family_order = if prefer_ipv6 {
|
||||
vec![MePingFamily::V6, MePingFamily::V4]
|
||||
} else {
|
||||
vec![MePingFamily::V4, MePingFamily::V6]
|
||||
};
|
||||
|
||||
for (dc, samples) in grouped {
|
||||
for family in &family_order {
|
||||
let fam_samples: Vec<&MePingSample> = samples
|
||||
.iter()
|
||||
.filter(|s| matches!(s.family, f if &f == family))
|
||||
.collect();
|
||||
if fam_samples.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let fam_label = match family {
|
||||
MePingFamily::V4 => "IPv4",
|
||||
MePingFamily::V6 => "IPv6",
|
||||
};
|
||||
info!(" DC{} [{}]", dc, fam_label);
|
||||
for sample in fam_samples {
|
||||
let line = format_sample_line(sample);
|
||||
info!("{}", line);
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("============================================================");
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_ME_CONNECTIVITY_PING,
|
||||
Some("startup ME connectivity check completed".to_string()),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
info!("================= Telegram DC Connectivity =================");
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_DC_CONNECTIVITY_PING,
|
||||
Some("run startup DC connectivity check".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
let ping_results = upstream_manager
|
||||
.ping_all_dcs(
|
||||
prefer_ipv6,
|
||||
&config.dc_overrides,
|
||||
decision.ipv4_dc,
|
||||
decision.ipv6_dc,
|
||||
)
|
||||
.await;
|
||||
|
||||
for upstream_result in &ping_results {
|
||||
let v6_works = upstream_result
|
||||
.v6_results
|
||||
.iter()
|
||||
.any(|r| r.rtt_ms.is_some());
|
||||
let v4_works = upstream_result
|
||||
.v4_results
|
||||
.iter()
|
||||
.any(|r| r.rtt_ms.is_some());
|
||||
|
||||
if upstream_result.both_available {
|
||||
if prefer_ipv6 {
|
||||
info!(" IPv6 in use / IPv4 is fallback");
|
||||
} else {
|
||||
info!(" IPv4 in use / IPv6 is fallback");
|
||||
}
|
||||
} else if v6_works && !v4_works {
|
||||
info!(" IPv6 only / IPv4 unavailable");
|
||||
} else if v4_works && !v6_works {
|
||||
info!(" IPv4 only / IPv6 unavailable");
|
||||
} else if !v6_works && !v4_works {
|
||||
info!(" No DC connectivity");
|
||||
}
|
||||
|
||||
info!(" via {}", upstream_result.upstream_name);
|
||||
info!("============================================================");
|
||||
|
||||
if v6_works {
|
||||
for dc in &upstream_result.v6_results {
|
||||
let addr_str = format!("{}:{}", dc.dc_addr.ip(), dc.dc_addr.port());
|
||||
match &dc.rtt_ms {
|
||||
Some(rtt) => {
|
||||
info!(" DC{} [IPv6] {} - {:.0} ms", dc.dc_idx, addr_str, rtt);
|
||||
}
|
||||
None => {
|
||||
let err = dc.error.as_deref().unwrap_or("fail");
|
||||
info!(" DC{} [IPv6] {} - FAIL ({})", dc.dc_idx, addr_str, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("============================================================");
|
||||
}
|
||||
|
||||
if v4_works {
|
||||
for dc in &upstream_result.v4_results {
|
||||
let addr_str = format!("{}:{}", dc.dc_addr.ip(), dc.dc_addr.port());
|
||||
match &dc.rtt_ms {
|
||||
Some(rtt) => {
|
||||
info!(
|
||||
" DC{} [IPv4] {}\t\t\t\t{:.0} ms",
|
||||
dc.dc_idx, addr_str, rtt
|
||||
);
|
||||
}
|
||||
None => {
|
||||
let err = dc.error.as_deref().unwrap_or("fail");
|
||||
info!(
|
||||
" DC{} [IPv4] {}:\t\t\t\tFAIL ({})",
|
||||
dc.dc_idx, addr_str, err
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("============================================================");
|
||||
}
|
||||
}
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_DC_CONNECTIVITY_PING,
|
||||
Some("startup DC connectivity check completed".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
let initialized_secs = process_started_at.elapsed().as_secs();
|
||||
let second_suffix = if initialized_secs == 1 { "" } else { "s" };
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_RUNTIME_READY,
|
||||
Some("finalize startup runtime state".to_string()),
|
||||
)
|
||||
.await;
|
||||
info!("===================== Telegram Startup =====================");
|
||||
info!(
|
||||
" DC/ME Initialized in {} second{}",
|
||||
initialized_secs, second_suffix
|
||||
);
|
||||
info!("============================================================");
|
||||
|
||||
if let Some(pool) = me_pool {
|
||||
pool.set_runtime_ready(true);
|
||||
}
|
||||
*api_me_pool.write().await = me_pool.clone();
|
||||
}
|
||||
|
|
@ -0,0 +1,548 @@
|
|||
#![allow(clippy::items_after_test_module)]
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::sync::watch;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use crate::cli;
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::logging::LogDestination;
|
||||
use crate::transport::UpstreamManager;
|
||||
use crate::transport::middle_proxy::{
|
||||
ProxyConfigData, fetch_proxy_config_with_raw_via_upstream, load_proxy_config_cache,
|
||||
save_proxy_config_cache,
|
||||
};
|
||||
|
||||
pub(crate) fn resolve_runtime_config_path(
|
||||
config_path_cli: &str,
|
||||
startup_cwd: &std::path::Path,
|
||||
config_path_explicit: bool,
|
||||
) -> PathBuf {
|
||||
if config_path_explicit {
|
||||
let raw = PathBuf::from(config_path_cli);
|
||||
let absolute = if raw.is_absolute() {
|
||||
raw
|
||||
} else {
|
||||
startup_cwd.join(raw)
|
||||
};
|
||||
return absolute.canonicalize().unwrap_or(absolute);
|
||||
}
|
||||
|
||||
let etc_telemt = std::path::Path::new("/etc/telemt");
|
||||
let candidates = [
|
||||
startup_cwd.join("config.toml"),
|
||||
startup_cwd.join("telemt.toml"),
|
||||
etc_telemt.join("telemt.toml"),
|
||||
etc_telemt.join("config.toml"),
|
||||
];
|
||||
for candidate in candidates {
|
||||
if candidate.is_file() {
|
||||
return candidate.canonicalize().unwrap_or(candidate);
|
||||
}
|
||||
}
|
||||
|
||||
startup_cwd.join("config.toml")
|
||||
}
|
||||
|
||||
/// Parsed CLI arguments.
|
||||
pub(crate) struct CliArgs {
|
||||
pub config_path: String,
|
||||
pub config_path_explicit: bool,
|
||||
pub data_path: Option<PathBuf>,
|
||||
pub silent: bool,
|
||||
pub log_level: Option<String>,
|
||||
pub log_destination: LogDestination,
|
||||
}
|
||||
|
||||
pub(crate) fn parse_cli() -> CliArgs {
|
||||
let mut config_path = "config.toml".to_string();
|
||||
let mut config_path_explicit = false;
|
||||
let mut data_path: Option<PathBuf> = None;
|
||||
let mut silent = false;
|
||||
let mut log_level: Option<String> = None;
|
||||
|
||||
let args: Vec<String> = std::env::args().skip(1).collect();
|
||||
|
||||
// Parse log destination
|
||||
let log_destination = crate::logging::parse_log_destination(&args);
|
||||
|
||||
// Check for --init first (handled before tokio)
|
||||
if let Some(init_opts) = cli::parse_init_args(&args) {
|
||||
if let Err(e) = cli::run_init(init_opts) {
|
||||
eprintln!("[telemt] Init failed: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
std::process::exit(0);
|
||||
}
|
||||
|
||||
let mut i = 0;
|
||||
while i < args.len() {
|
||||
match args[i].as_str() {
|
||||
"--data-path" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
data_path = Some(PathBuf::from(args[i].clone()));
|
||||
} else {
|
||||
eprintln!("Missing value for --data-path");
|
||||
std::process::exit(0);
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--data-path=") => {
|
||||
data_path = Some(PathBuf::from(
|
||||
s.trim_start_matches("--data-path=").to_string(),
|
||||
));
|
||||
}
|
||||
"--working-dir" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
data_path = Some(PathBuf::from(args[i].clone()));
|
||||
} else {
|
||||
eprintln!("Missing value for --working-dir");
|
||||
std::process::exit(0);
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--working-dir=") => {
|
||||
data_path = Some(PathBuf::from(
|
||||
s.trim_start_matches("--working-dir=").to_string(),
|
||||
));
|
||||
}
|
||||
"--silent" | "-s" => {
|
||||
silent = true;
|
||||
}
|
||||
"--log-level" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
log_level = Some(args[i].clone());
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--log-level=") => {
|
||||
log_level = Some(s.trim_start_matches("--log-level=").to_string());
|
||||
}
|
||||
"--help" | "-h" => {
|
||||
print_help();
|
||||
std::process::exit(0);
|
||||
}
|
||||
"--version" | "-V" => {
|
||||
println!("telemt {}", env!("CARGO_PKG_VERSION"));
|
||||
std::process::exit(0);
|
||||
}
|
||||
// Skip daemon-related flags (already parsed)
|
||||
"--daemon" | "-d" | "--foreground" | "-f" => {}
|
||||
s if s.starts_with("--pid-file") => {
|
||||
if !s.contains('=') {
|
||||
i += 1; // skip value
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--run-as-user") => {
|
||||
if !s.contains('=') {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--run-as-group") => {
|
||||
if !s.contains('=') {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
s if !s.starts_with('-') => {
|
||||
if !matches!(s, "run" | "start" | "stop" | "reload" | "status") {
|
||||
config_path = s.to_string();
|
||||
config_path_explicit = true;
|
||||
}
|
||||
}
|
||||
other => {
|
||||
eprintln!("Unknown option: {}", other);
|
||||
}
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
CliArgs {
|
||||
config_path,
|
||||
config_path_explicit,
|
||||
data_path,
|
||||
silent,
|
||||
log_level,
|
||||
log_destination,
|
||||
}
|
||||
}
|
||||
|
||||
fn print_help() {
|
||||
eprintln!("Usage: telemt [COMMAND] [OPTIONS] [config.toml]");
|
||||
eprintln!();
|
||||
eprintln!("Commands:");
|
||||
eprintln!(" run Run in foreground (default if no command given)");
|
||||
#[cfg(unix)]
|
||||
{
|
||||
eprintln!(" start Start as background daemon");
|
||||
eprintln!(" stop Stop a running daemon");
|
||||
eprintln!(" reload Reload configuration (send SIGHUP)");
|
||||
eprintln!(" status Check if daemon is running");
|
||||
}
|
||||
eprintln!();
|
||||
eprintln!("Options:");
|
||||
eprintln!(
|
||||
" --data-path <DIR> Set data directory (absolute path; overrides config value)"
|
||||
);
|
||||
eprintln!(" --working-dir <DIR> Alias for --data-path");
|
||||
eprintln!(" --silent, -s Suppress info logs");
|
||||
eprintln!(" --log-level <LEVEL> debug|verbose|normal|silent");
|
||||
eprintln!(" --help, -h Show this help");
|
||||
eprintln!(" --version, -V Show version");
|
||||
eprintln!();
|
||||
eprintln!("Logging options:");
|
||||
eprintln!(" --log-file <PATH> Log to file (default: stderr)");
|
||||
eprintln!(" --log-file-daily <PATH> Log to file with daily rotation");
|
||||
#[cfg(unix)]
|
||||
eprintln!(" --syslog Log to syslog (Unix only)");
|
||||
eprintln!();
|
||||
#[cfg(unix)]
|
||||
{
|
||||
eprintln!("Daemon options (Unix only):");
|
||||
eprintln!(" --daemon, -d Fork to background (daemonize)");
|
||||
eprintln!(" --foreground, -f Explicit foreground mode (for systemd)");
|
||||
eprintln!(" --pid-file <PATH> PID file path (default: /var/run/telemt.pid)");
|
||||
eprintln!(" --run-as-user <USER> Drop privileges to this user after binding");
|
||||
eprintln!(" --run-as-group <GROUP> Drop privileges to this group after binding");
|
||||
eprintln!(" --working-dir <DIR> Working directory for daemon mode");
|
||||
eprintln!();
|
||||
}
|
||||
eprintln!("Setup (fire-and-forget):");
|
||||
eprintln!(" --init Generate config, install systemd service, start");
|
||||
eprintln!(" --port <PORT> Listen port (default: 443)");
|
||||
eprintln!(" --domain <DOMAIN> TLS domain for masking (default: www.google.com)");
|
||||
eprintln!(" --secret <HEX> 32-char hex secret (auto-generated if omitted)");
|
||||
eprintln!(" --user <NAME> Username (default: user)");
|
||||
eprintln!(" --config-dir <DIR> Config directory (default: /etc/telemt)");
|
||||
eprintln!(" --no-start Don't start the service after install");
|
||||
#[cfg(unix)]
|
||||
{
|
||||
eprintln!();
|
||||
eprintln!("Examples:");
|
||||
eprintln!(" telemt config.toml Run in foreground");
|
||||
eprintln!(" telemt start config.toml Start as daemon");
|
||||
eprintln!(" telemt start --pid-file /tmp/t.pid Start with custom PID file");
|
||||
eprintln!(" telemt stop Stop daemon");
|
||||
eprintln!(" telemt reload Reload configuration");
|
||||
eprintln!(" telemt status Check daemon status");
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::resolve_runtime_config_path;
|
||||
|
||||
#[test]
|
||||
fn resolve_runtime_config_path_anchors_relative_to_startup_cwd() {
|
||||
let nonce = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
let startup_cwd = std::env::temp_dir().join(format!("telemt_cfg_path_{nonce}"));
|
||||
std::fs::create_dir_all(&startup_cwd).unwrap();
|
||||
let target = startup_cwd.join("config.toml");
|
||||
std::fs::write(&target, " ").unwrap();
|
||||
|
||||
let resolved = resolve_runtime_config_path("config.toml", &startup_cwd, true);
|
||||
assert_eq!(resolved, target.canonicalize().unwrap());
|
||||
|
||||
let _ = std::fs::remove_file(&target);
|
||||
let _ = std::fs::remove_dir(&startup_cwd);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_runtime_config_path_keeps_absolute_for_missing_file() {
|
||||
let nonce = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
let startup_cwd = std::env::temp_dir().join(format!("telemt_cfg_path_missing_{nonce}"));
|
||||
std::fs::create_dir_all(&startup_cwd).unwrap();
|
||||
|
||||
let resolved = resolve_runtime_config_path("missing.toml", &startup_cwd, true);
|
||||
assert_eq!(resolved, startup_cwd.join("missing.toml"));
|
||||
|
||||
let _ = std::fs::remove_dir(&startup_cwd);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_runtime_config_path_uses_startup_candidates_when_not_explicit() {
|
||||
let nonce = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
let startup_cwd =
|
||||
std::env::temp_dir().join(format!("telemt_cfg_startup_candidates_{nonce}"));
|
||||
std::fs::create_dir_all(&startup_cwd).unwrap();
|
||||
let telemt = startup_cwd.join("telemt.toml");
|
||||
std::fs::write(&telemt, " ").unwrap();
|
||||
|
||||
let resolved = resolve_runtime_config_path("config.toml", &startup_cwd, false);
|
||||
assert_eq!(resolved, telemt.canonicalize().unwrap());
|
||||
|
||||
let _ = std::fs::remove_file(&telemt);
|
||||
let _ = std::fs::remove_dir(&startup_cwd);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_runtime_config_path_defaults_to_startup_config_when_none_found() {
|
||||
let nonce = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
let startup_cwd = std::env::temp_dir().join(format!("telemt_cfg_startup_default_{nonce}"));
|
||||
std::fs::create_dir_all(&startup_cwd).unwrap();
|
||||
|
||||
let resolved = resolve_runtime_config_path("config.toml", &startup_cwd, false);
|
||||
assert_eq!(resolved, startup_cwd.join("config.toml"));
|
||||
|
||||
let _ = std::fs::remove_dir(&startup_cwd);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn print_proxy_links(host: &str, port: u16, config: &ProxyConfig) {
|
||||
info!(target: "telemt::links", "--- Proxy Links ({}) ---", host);
|
||||
for user_name in config
|
||||
.general
|
||||
.links
|
||||
.show
|
||||
.resolve_users(&config.access.users)
|
||||
{
|
||||
if let Some(secret) = config.access.users.get(user_name) {
|
||||
info!(target: "telemt::links", "User: {}", user_name);
|
||||
if config.general.modes.classic {
|
||||
info!(
|
||||
target: "telemt::links",
|
||||
" Classic: tg://proxy?server={}&port={}&secret={}",
|
||||
host, port, secret
|
||||
);
|
||||
}
|
||||
if config.general.modes.secure {
|
||||
info!(
|
||||
target: "telemt::links",
|
||||
" DD: tg://proxy?server={}&port={}&secret=dd{}",
|
||||
host, port, secret
|
||||
);
|
||||
}
|
||||
if config.general.modes.tls {
|
||||
let mut domains = Vec::with_capacity(1 + config.censorship.tls_domains.len());
|
||||
domains.push(config.censorship.tls_domain.clone());
|
||||
for d in &config.censorship.tls_domains {
|
||||
if !domains.contains(d) {
|
||||
domains.push(d.clone());
|
||||
}
|
||||
}
|
||||
|
||||
for domain in domains {
|
||||
let domain_hex = hex::encode(&domain);
|
||||
info!(
|
||||
target: "telemt::links",
|
||||
" EE-TLS: tg://proxy?server={}&port={}&secret=ee{}{}",
|
||||
host, port, secret, domain_hex
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!(target: "telemt::links", "User '{}' in show_link not found", user_name);
|
||||
}
|
||||
}
|
||||
info!(target: "telemt::links", "------------------------");
|
||||
}
|
||||
|
||||
pub(crate) async fn write_beobachten_snapshot(path: &str, payload: &str) -> std::io::Result<()> {
|
||||
if let Some(parent) = std::path::Path::new(path).parent()
|
||||
&& !parent.as_os_str().is_empty()
|
||||
{
|
||||
tokio::fs::create_dir_all(parent).await?;
|
||||
}
|
||||
tokio::fs::write(path, payload).await
|
||||
}
|
||||
|
||||
pub(crate) fn unit_label(value: u64, singular: &'static str, plural: &'static str) -> &'static str {
|
||||
if value == 1 { singular } else { plural }
|
||||
}
|
||||
|
||||
pub(crate) fn format_uptime(total_secs: u64) -> String {
|
||||
const SECS_PER_MINUTE: u64 = 60;
|
||||
const SECS_PER_HOUR: u64 = 60 * SECS_PER_MINUTE;
|
||||
const SECS_PER_DAY: u64 = 24 * SECS_PER_HOUR;
|
||||
const SECS_PER_MONTH: u64 = 30 * SECS_PER_DAY;
|
||||
const SECS_PER_YEAR: u64 = 12 * SECS_PER_MONTH;
|
||||
|
||||
let mut remaining = total_secs;
|
||||
let years = remaining / SECS_PER_YEAR;
|
||||
remaining %= SECS_PER_YEAR;
|
||||
let months = remaining / SECS_PER_MONTH;
|
||||
remaining %= SECS_PER_MONTH;
|
||||
let days = remaining / SECS_PER_DAY;
|
||||
remaining %= SECS_PER_DAY;
|
||||
let hours = remaining / SECS_PER_HOUR;
|
||||
remaining %= SECS_PER_HOUR;
|
||||
let minutes = remaining / SECS_PER_MINUTE;
|
||||
let seconds = remaining % SECS_PER_MINUTE;
|
||||
|
||||
let mut parts = Vec::new();
|
||||
if total_secs > SECS_PER_YEAR {
|
||||
parts.push(format!("{} {}", years, unit_label(years, "year", "years")));
|
||||
}
|
||||
if total_secs > SECS_PER_MONTH {
|
||||
parts.push(format!(
|
||||
"{} {}",
|
||||
months,
|
||||
unit_label(months, "month", "months")
|
||||
));
|
||||
}
|
||||
if total_secs > SECS_PER_DAY {
|
||||
parts.push(format!("{} {}", days, unit_label(days, "day", "days")));
|
||||
}
|
||||
if total_secs > SECS_PER_HOUR {
|
||||
parts.push(format!("{} {}", hours, unit_label(hours, "hour", "hours")));
|
||||
}
|
||||
if total_secs > SECS_PER_MINUTE {
|
||||
parts.push(format!(
|
||||
"{} {}",
|
||||
minutes,
|
||||
unit_label(minutes, "minute", "minutes")
|
||||
));
|
||||
}
|
||||
parts.push(format!(
|
||||
"{} {}",
|
||||
seconds,
|
||||
unit_label(seconds, "second", "seconds")
|
||||
));
|
||||
|
||||
format!("{} / {} seconds", parts.join(", "), total_secs)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) async fn wait_until_admission_open(admission_rx: &mut watch::Receiver<bool>) -> bool {
|
||||
loop {
|
||||
if *admission_rx.borrow() {
|
||||
return true;
|
||||
}
|
||||
if admission_rx.changed().await.is_err() {
|
||||
return *admission_rx.borrow();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn is_expected_handshake_eof(err: &crate::error::ProxyError) -> bool {
|
||||
err.to_string().contains("expected 64 bytes, got 0")
|
||||
}
|
||||
|
||||
pub(crate) async fn load_startup_proxy_config_snapshot(
|
||||
url: &str,
|
||||
cache_path: Option<&str>,
|
||||
me2dc_fallback: bool,
|
||||
label: &'static str,
|
||||
upstream: Option<std::sync::Arc<UpstreamManager>>,
|
||||
) -> Option<ProxyConfigData> {
|
||||
loop {
|
||||
match fetch_proxy_config_with_raw_via_upstream(url, upstream.clone()).await {
|
||||
Ok((cfg, raw)) => {
|
||||
if !cfg.map.is_empty() {
|
||||
if let Some(path) = cache_path
|
||||
&& let Err(e) = save_proxy_config_cache(path, &raw).await
|
||||
{
|
||||
warn!(error = %e, path, snapshot = label, "Failed to store startup proxy-config cache");
|
||||
}
|
||||
return Some(cfg);
|
||||
}
|
||||
|
||||
warn!(
|
||||
snapshot = label,
|
||||
url, "Startup proxy-config is empty; trying disk cache"
|
||||
);
|
||||
if let Some(path) = cache_path {
|
||||
match load_proxy_config_cache(path).await {
|
||||
Ok(cached) if !cached.map.is_empty() => {
|
||||
info!(
|
||||
snapshot = label,
|
||||
path,
|
||||
proxy_for_lines = cached.proxy_for_lines,
|
||||
"Loaded startup proxy-config from disk cache"
|
||||
);
|
||||
return Some(cached);
|
||||
}
|
||||
Ok(_) => {
|
||||
warn!(
|
||||
snapshot = label,
|
||||
path, "Startup proxy-config cache is empty; ignoring cache file"
|
||||
);
|
||||
}
|
||||
Err(cache_err) => {
|
||||
debug!(
|
||||
snapshot = label,
|
||||
path,
|
||||
error = %cache_err,
|
||||
"Startup proxy-config cache unavailable"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if me2dc_fallback {
|
||||
error!(
|
||||
snapshot = label,
|
||||
"Startup proxy-config unavailable and no saved config found; falling back to direct mode"
|
||||
);
|
||||
return None;
|
||||
}
|
||||
|
||||
warn!(
|
||||
snapshot = label,
|
||||
retry_in_secs = 2,
|
||||
"Startup proxy-config unavailable and no saved config found; retrying because me2dc_fallback=false"
|
||||
);
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
Err(fetch_err) => {
|
||||
if let Some(path) = cache_path {
|
||||
match load_proxy_config_cache(path).await {
|
||||
Ok(cached) if !cached.map.is_empty() => {
|
||||
info!(
|
||||
snapshot = label,
|
||||
path,
|
||||
proxy_for_lines = cached.proxy_for_lines,
|
||||
"Loaded startup proxy-config from disk cache"
|
||||
);
|
||||
return Some(cached);
|
||||
}
|
||||
Ok(_) => {
|
||||
warn!(
|
||||
snapshot = label,
|
||||
path, "Startup proxy-config cache is empty; ignoring cache file"
|
||||
);
|
||||
}
|
||||
Err(cache_err) => {
|
||||
debug!(
|
||||
snapshot = label,
|
||||
path,
|
||||
error = %cache_err,
|
||||
"Startup proxy-config cache unavailable"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if me2dc_fallback {
|
||||
error!(
|
||||
snapshot = label,
|
||||
error = %fetch_err,
|
||||
"Startup proxy-config unavailable and no cached data; falling back to direct mode"
|
||||
);
|
||||
return None;
|
||||
}
|
||||
|
||||
warn!(
|
||||
snapshot = label,
|
||||
error = %fetch_err,
|
||||
retry_in_secs = 2,
|
||||
"Startup proxy-config unavailable; retrying because me2dc_fallback=false"
|
||||
);
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,546 @@
|
|||
use std::error::Error;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::net::TcpListener;
|
||||
#[cfg(unix)]
|
||||
use tokio::net::UnixListener;
|
||||
use tokio::sync::{Semaphore, watch};
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::proxy::ClientHandler;
|
||||
use crate::proxy::route_mode::{ROUTE_SWITCH_ERROR_MSG, RouteRuntimeController};
|
||||
use crate::proxy::shared_state::ProxySharedState;
|
||||
use crate::startup::{COMPONENT_LISTENERS_BIND, StartupTracker};
|
||||
use crate::stats::beobachten::BeobachtenStore;
|
||||
use crate::stats::{ReplayChecker, Stats};
|
||||
use crate::stream::BufferPool;
|
||||
use crate::tls_front::TlsFrontCache;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
use crate::transport::{ListenOptions, UpstreamManager, create_listener, find_listener_processes};
|
||||
|
||||
use super::helpers::{is_expected_handshake_eof, print_proxy_links};
|
||||
|
||||
pub(crate) struct BoundListeners {
|
||||
pub(crate) listeners: Vec<(TcpListener, bool)>,
|
||||
pub(crate) has_unix_listener: bool,
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) async fn bind_listeners(
|
||||
config: &Arc<ProxyConfig>,
|
||||
decision_ipv4_dc: bool,
|
||||
decision_ipv6_dc: bool,
|
||||
detected_ip_v4: Option<IpAddr>,
|
||||
detected_ip_v6: Option<IpAddr>,
|
||||
startup_tracker: &Arc<StartupTracker>,
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
admission_rx: watch::Receiver<bool>,
|
||||
stats: Arc<Stats>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
replay_checker: Arc<ReplayChecker>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
route_runtime: Arc<RouteRuntimeController>,
|
||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
max_connections: Arc<Semaphore>,
|
||||
) -> Result<BoundListeners, Box<dyn Error>> {
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_LISTENERS_BIND,
|
||||
Some("bind TCP/Unix listeners".to_string()),
|
||||
)
|
||||
.await;
|
||||
let mut listeners = Vec::new();
|
||||
|
||||
for listener_conf in &config.server.listeners {
|
||||
let addr = SocketAddr::new(listener_conf.ip, config.server.port);
|
||||
if addr.is_ipv4() && !decision_ipv4_dc {
|
||||
warn!(%addr, "Skipping IPv4 listener: IPv4 disabled by [network]");
|
||||
continue;
|
||||
}
|
||||
if addr.is_ipv6() && !decision_ipv6_dc {
|
||||
warn!(%addr, "Skipping IPv6 listener: IPv6 disabled by [network]");
|
||||
continue;
|
||||
}
|
||||
let options = ListenOptions {
|
||||
reuse_port: listener_conf.reuse_allow,
|
||||
ipv6_only: listener_conf.ip.is_ipv6(),
|
||||
backlog: config.server.listen_backlog,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
match create_listener(addr, &options) {
|
||||
Ok(socket) => {
|
||||
let listener = TcpListener::from_std(socket.into())?;
|
||||
info!("Listening on {}", addr);
|
||||
let listener_proxy_protocol = listener_conf
|
||||
.proxy_protocol
|
||||
.unwrap_or(config.server.proxy_protocol);
|
||||
|
||||
let public_host = if let Some(ref announce) = listener_conf.announce {
|
||||
announce.clone()
|
||||
} else if listener_conf.ip.is_unspecified() {
|
||||
if listener_conf.ip.is_ipv4() {
|
||||
detected_ip_v4
|
||||
.map(|ip| ip.to_string())
|
||||
.unwrap_or_else(|| listener_conf.ip.to_string())
|
||||
} else {
|
||||
detected_ip_v6
|
||||
.map(|ip| ip.to_string())
|
||||
.unwrap_or_else(|| listener_conf.ip.to_string())
|
||||
}
|
||||
} else {
|
||||
listener_conf.ip.to_string()
|
||||
};
|
||||
|
||||
if config.general.links.public_host.is_none()
|
||||
&& !config.general.links.show.is_empty()
|
||||
{
|
||||
let link_port = config
|
||||
.general
|
||||
.links
|
||||
.public_port
|
||||
.unwrap_or(config.server.port);
|
||||
print_proxy_links(&public_host, link_port, config);
|
||||
}
|
||||
|
||||
listeners.push((listener, listener_proxy_protocol));
|
||||
}
|
||||
Err(e) => {
|
||||
if e.kind() == std::io::ErrorKind::AddrInUse {
|
||||
let owners = find_listener_processes(addr);
|
||||
if owners.is_empty() {
|
||||
error!(
|
||||
%addr,
|
||||
"Failed to bind: address already in use (owner process unresolved)"
|
||||
);
|
||||
} else {
|
||||
for owner in owners {
|
||||
error!(
|
||||
%addr,
|
||||
pid = owner.pid,
|
||||
process = %owner.process,
|
||||
"Failed to bind: address already in use"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if !listener_conf.reuse_allow {
|
||||
error!(
|
||||
%addr,
|
||||
"reuse_allow=false; set [[server.listeners]].reuse_allow=true to allow multi-instance listening"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
error!("Failed to bind to {}: {}", addr, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !config.general.links.show.is_empty()
|
||||
&& (config.general.links.public_host.is_some() || listeners.is_empty())
|
||||
{
|
||||
let (host, port) = if let Some(ref h) = config.general.links.public_host {
|
||||
(
|
||||
h.clone(),
|
||||
config
|
||||
.general
|
||||
.links
|
||||
.public_port
|
||||
.unwrap_or(config.server.port),
|
||||
)
|
||||
} else {
|
||||
let ip = detected_ip_v4.or(detected_ip_v6).map(|ip| ip.to_string());
|
||||
if ip.is_none() {
|
||||
warn!(
|
||||
"show_link is configured but public IP could not be detected. Set public_host in config."
|
||||
);
|
||||
}
|
||||
(
|
||||
ip.unwrap_or_else(|| "UNKNOWN".to_string()),
|
||||
config
|
||||
.general
|
||||
.links
|
||||
.public_port
|
||||
.unwrap_or(config.server.port),
|
||||
)
|
||||
};
|
||||
|
||||
print_proxy_links(&host, port, config);
|
||||
}
|
||||
|
||||
let mut has_unix_listener = false;
|
||||
#[cfg(unix)]
|
||||
if let Some(ref unix_path) = config.server.listen_unix_sock {
|
||||
let _ = tokio::fs::remove_file(unix_path).await;
|
||||
|
||||
let unix_listener = UnixListener::bind(unix_path)?;
|
||||
|
||||
if let Some(ref perm_str) = config.server.listen_unix_sock_perm {
|
||||
match u32::from_str_radix(perm_str.trim_start_matches('0'), 8) {
|
||||
Ok(mode) => {
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let perms = std::fs::Permissions::from_mode(mode);
|
||||
if let Err(e) = std::fs::set_permissions(unix_path, perms) {
|
||||
error!(
|
||||
"Failed to set unix socket permissions to {}: {}",
|
||||
perm_str, e
|
||||
);
|
||||
} else {
|
||||
info!("Listening on unix:{} (mode {})", unix_path, perm_str);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Invalid listen_unix_sock_perm '{}': {}. Ignoring.",
|
||||
perm_str, e
|
||||
);
|
||||
info!("Listening on unix:{}", unix_path);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("Listening on unix:{}", unix_path);
|
||||
}
|
||||
|
||||
has_unix_listener = true;
|
||||
|
||||
let mut config_rx_unix: watch::Receiver<Arc<ProxyConfig>> = config_rx.clone();
|
||||
let admission_rx_unix = admission_rx.clone();
|
||||
let stats = stats.clone();
|
||||
let upstream_manager = upstream_manager.clone();
|
||||
let replay_checker = replay_checker.clone();
|
||||
let buffer_pool = buffer_pool.clone();
|
||||
let rng = rng.clone();
|
||||
let me_pool = me_pool.clone();
|
||||
let route_runtime = route_runtime.clone();
|
||||
let tls_cache = tls_cache.clone();
|
||||
let ip_tracker = ip_tracker.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
let shared = shared.clone();
|
||||
let max_connections_unix = max_connections.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let unix_conn_counter = Arc::new(std::sync::atomic::AtomicU64::new(1));
|
||||
|
||||
loop {
|
||||
match unix_listener.accept().await {
|
||||
Ok((stream, _)) => {
|
||||
if !*admission_rx_unix.borrow() {
|
||||
drop(stream);
|
||||
continue;
|
||||
}
|
||||
let accept_permit_timeout_ms =
|
||||
config_rx_unix.borrow().server.accept_permit_timeout_ms;
|
||||
let permit = if accept_permit_timeout_ms == 0 {
|
||||
match max_connections_unix.clone().acquire_owned().await {
|
||||
Ok(permit) => permit,
|
||||
Err(_) => {
|
||||
error!("Connection limiter is closed");
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match tokio::time::timeout(
|
||||
Duration::from_millis(accept_permit_timeout_ms),
|
||||
max_connections_unix.clone().acquire_owned(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Ok(permit)) => permit,
|
||||
Ok(Err(_)) => {
|
||||
error!("Connection limiter is closed");
|
||||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
stats.increment_accept_permit_timeout_total();
|
||||
debug!(
|
||||
timeout_ms = accept_permit_timeout_ms,
|
||||
"Dropping accepted unix connection: permit wait timeout"
|
||||
);
|
||||
drop(stream);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
};
|
||||
let conn_id =
|
||||
unix_conn_counter.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
let fake_peer =
|
||||
SocketAddr::from(([127, 0, 0, 1], (conn_id % 65535) as u16));
|
||||
|
||||
let config = config_rx_unix.borrow_and_update().clone();
|
||||
let stats = stats.clone();
|
||||
let upstream_manager = upstream_manager.clone();
|
||||
let replay_checker = replay_checker.clone();
|
||||
let buffer_pool = buffer_pool.clone();
|
||||
let rng = rng.clone();
|
||||
let me_pool = me_pool.clone();
|
||||
let route_runtime = route_runtime.clone();
|
||||
let tls_cache = tls_cache.clone();
|
||||
let ip_tracker = ip_tracker.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
let shared = shared.clone();
|
||||
let proxy_protocol_enabled = config.server.proxy_protocol;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let _permit = permit;
|
||||
if let Err(e) = crate::proxy::client::handle_client_stream_with_shared(
|
||||
stream,
|
||||
fake_peer,
|
||||
config,
|
||||
stats,
|
||||
upstream_manager,
|
||||
replay_checker,
|
||||
buffer_pool,
|
||||
rng,
|
||||
me_pool,
|
||||
route_runtime,
|
||||
tls_cache,
|
||||
ip_tracker,
|
||||
beobachten,
|
||||
shared,
|
||||
proxy_protocol_enabled,
|
||||
)
|
||||
.await
|
||||
{
|
||||
debug!(error = %e, "Unix socket connection error");
|
||||
}
|
||||
});
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Unix socket accept error: {}", e);
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_LISTENERS_BIND,
|
||||
Some(format!(
|
||||
"listeners configured tcp={} unix={}",
|
||||
listeners.len(),
|
||||
has_unix_listener
|
||||
)),
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(BoundListeners {
|
||||
listeners,
|
||||
has_unix_listener,
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn spawn_tcp_accept_loops(
|
||||
listeners: Vec<(TcpListener, bool)>,
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
admission_rx: watch::Receiver<bool>,
|
||||
stats: Arc<Stats>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
replay_checker: Arc<ReplayChecker>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
route_runtime: Arc<RouteRuntimeController>,
|
||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
max_connections: Arc<Semaphore>,
|
||||
) {
|
||||
for (listener, listener_proxy_protocol) in listeners {
|
||||
let mut config_rx: watch::Receiver<Arc<ProxyConfig>> = config_rx.clone();
|
||||
let admission_rx_tcp = admission_rx.clone();
|
||||
let stats = stats.clone();
|
||||
let upstream_manager = upstream_manager.clone();
|
||||
let replay_checker = replay_checker.clone();
|
||||
let buffer_pool = buffer_pool.clone();
|
||||
let rng = rng.clone();
|
||||
let me_pool = me_pool.clone();
|
||||
let route_runtime = route_runtime.clone();
|
||||
let tls_cache = tls_cache.clone();
|
||||
let ip_tracker = ip_tracker.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
let shared = shared.clone();
|
||||
let max_connections_tcp = max_connections.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
match listener.accept().await {
|
||||
Ok((stream, peer_addr)) => {
|
||||
if !*admission_rx_tcp.borrow() {
|
||||
debug!(peer = %peer_addr, "Admission gate closed, dropping connection");
|
||||
drop(stream);
|
||||
continue;
|
||||
}
|
||||
let accept_permit_timeout_ms =
|
||||
config_rx.borrow().server.accept_permit_timeout_ms;
|
||||
let permit = if accept_permit_timeout_ms == 0 {
|
||||
match max_connections_tcp.clone().acquire_owned().await {
|
||||
Ok(permit) => permit,
|
||||
Err(_) => {
|
||||
error!("Connection limiter is closed");
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match tokio::time::timeout(
|
||||
Duration::from_millis(accept_permit_timeout_ms),
|
||||
max_connections_tcp.clone().acquire_owned(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Ok(permit)) => permit,
|
||||
Ok(Err(_)) => {
|
||||
error!("Connection limiter is closed");
|
||||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
stats.increment_accept_permit_timeout_total();
|
||||
debug!(
|
||||
peer = %peer_addr,
|
||||
timeout_ms = accept_permit_timeout_ms,
|
||||
"Dropping accepted connection: permit wait timeout"
|
||||
);
|
||||
drop(stream);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
};
|
||||
let config = config_rx.borrow_and_update().clone();
|
||||
let stats = stats.clone();
|
||||
let upstream_manager = upstream_manager.clone();
|
||||
let replay_checker = replay_checker.clone();
|
||||
let buffer_pool = buffer_pool.clone();
|
||||
let rng = rng.clone();
|
||||
let me_pool = me_pool.clone();
|
||||
let route_runtime = route_runtime.clone();
|
||||
let tls_cache = tls_cache.clone();
|
||||
let ip_tracker = ip_tracker.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
let shared = shared.clone();
|
||||
let proxy_protocol_enabled = listener_proxy_protocol;
|
||||
let real_peer_report = Arc::new(std::sync::Mutex::new(None));
|
||||
let real_peer_report_for_handler = real_peer_report.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let _permit = permit;
|
||||
if let Err(e) = ClientHandler::new_with_shared(
|
||||
stream,
|
||||
peer_addr,
|
||||
config,
|
||||
stats,
|
||||
upstream_manager,
|
||||
replay_checker,
|
||||
buffer_pool,
|
||||
rng,
|
||||
me_pool,
|
||||
route_runtime,
|
||||
tls_cache,
|
||||
ip_tracker,
|
||||
beobachten,
|
||||
shared,
|
||||
proxy_protocol_enabled,
|
||||
real_peer_report_for_handler,
|
||||
)
|
||||
.run()
|
||||
.await
|
||||
{
|
||||
let real_peer = match real_peer_report.lock() {
|
||||
Ok(guard) => *guard,
|
||||
Err(_) => None,
|
||||
};
|
||||
let peer_closed = matches!(
|
||||
&e,
|
||||
crate::error::ProxyError::Io(ioe)
|
||||
if matches!(
|
||||
ioe.kind(),
|
||||
std::io::ErrorKind::ConnectionReset
|
||||
| std::io::ErrorKind::ConnectionAborted
|
||||
| std::io::ErrorKind::BrokenPipe
|
||||
| std::io::ErrorKind::NotConnected
|
||||
)
|
||||
) || matches!(
|
||||
&e,
|
||||
crate::error::ProxyError::Stream(
|
||||
crate::error::StreamError::Io(ioe)
|
||||
)
|
||||
if matches!(
|
||||
ioe.kind(),
|
||||
std::io::ErrorKind::ConnectionReset
|
||||
| std::io::ErrorKind::ConnectionAborted
|
||||
| std::io::ErrorKind::BrokenPipe
|
||||
| std::io::ErrorKind::NotConnected
|
||||
)
|
||||
);
|
||||
|
||||
let me_closed = matches!(
|
||||
&e,
|
||||
crate::error::ProxyError::Proxy(msg) if msg == "ME connection lost"
|
||||
);
|
||||
let route_switched = matches!(
|
||||
&e,
|
||||
crate::error::ProxyError::Proxy(msg) if msg == ROUTE_SWITCH_ERROR_MSG
|
||||
);
|
||||
|
||||
match (peer_closed, me_closed) {
|
||||
(true, _) => {
|
||||
if let Some(real_peer) = real_peer {
|
||||
debug!(peer = %peer_addr, real_peer = %real_peer, error = %e, "Connection closed by client");
|
||||
} else {
|
||||
debug!(peer = %peer_addr, error = %e, "Connection closed by client");
|
||||
}
|
||||
}
|
||||
(_, true) => {
|
||||
if let Some(real_peer) = real_peer {
|
||||
warn!(peer = %peer_addr, real_peer = %real_peer, error = %e, "Connection closed: Middle-End dropped session");
|
||||
} else {
|
||||
warn!(peer = %peer_addr, error = %e, "Connection closed: Middle-End dropped session");
|
||||
}
|
||||
}
|
||||
_ if route_switched => {
|
||||
if let Some(real_peer) = real_peer {
|
||||
info!(peer = %peer_addr, real_peer = %real_peer, error = %e, "Connection closed by controlled route cutover");
|
||||
} else {
|
||||
info!(peer = %peer_addr, error = %e, "Connection closed by controlled route cutover");
|
||||
}
|
||||
}
|
||||
_ if is_expected_handshake_eof(&e) => {
|
||||
if let Some(real_peer) = real_peer {
|
||||
info!(peer = %peer_addr, real_peer = %real_peer, error = %e, "Connection closed during initial handshake");
|
||||
} else {
|
||||
info!(peer = %peer_addr, error = %e, "Connection closed during initial handshake");
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
if let Some(real_peer) = real_peer {
|
||||
warn!(peer = %peer_addr, real_peer = %real_peer, error = %e, "Connection closed with error");
|
||||
} else {
|
||||
warn!(peer = %peer_addr, error = %e, "Connection closed with error");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Accept error: {}", e);
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,646 @@
|
|||
#![allow(clippy::too_many_arguments)]
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::network::probe::{NetworkDecision, NetworkProbe};
|
||||
use crate::startup::{
|
||||
COMPONENT_ME_POOL_CONSTRUCT, COMPONENT_ME_POOL_INIT_STAGE1, COMPONENT_ME_PROXY_CONFIG_V4,
|
||||
COMPONENT_ME_PROXY_CONFIG_V6, COMPONENT_ME_SECRET_FETCH, StartupMeStatus, StartupTracker,
|
||||
};
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::UpstreamManager;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
|
||||
use super::helpers::load_startup_proxy_config_snapshot;
|
||||
|
||||
pub(crate) async fn initialize_me_pool(
|
||||
use_middle_proxy: bool,
|
||||
config: &ProxyConfig,
|
||||
decision: &NetworkDecision,
|
||||
probe: &NetworkProbe,
|
||||
startup_tracker: &Arc<StartupTracker>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
rng: Arc<SecureRandom>,
|
||||
stats: Arc<Stats>,
|
||||
api_me_pool: Arc<RwLock<Option<Arc<MePool>>>>,
|
||||
) -> Option<Arc<MePool>> {
|
||||
if !use_middle_proxy {
|
||||
return None;
|
||||
}
|
||||
|
||||
info!("=== Middle Proxy Mode ===");
|
||||
let me_nat_probe = config.general.middle_proxy_nat_probe && config.network.stun_use;
|
||||
if config.general.middle_proxy_nat_probe && !config.network.stun_use {
|
||||
info!("Middle-proxy STUN probing disabled by network.stun_use=false");
|
||||
}
|
||||
|
||||
let me2dc_fallback = config.general.me2dc_fallback;
|
||||
let me_init_retry_attempts = config.general.me_init_retry_attempts;
|
||||
let me_init_warn_after_attempts: u32 = 3;
|
||||
|
||||
// Global ad_tag (pool default). Used when user has no per-user tag in access.user_ad_tags.
|
||||
let proxy_tag = config
|
||||
.general
|
||||
.ad_tag
|
||||
.as_ref()
|
||||
.map(|tag| hex::decode(tag).expect("general.ad_tag must be validated before startup"));
|
||||
|
||||
// =============================================================
|
||||
// CRITICAL: Download Telegram proxy-secret (NOT user secret!)
|
||||
//
|
||||
// C MTProxy uses TWO separate secrets:
|
||||
// -S flag = 16-byte user secret for client obfuscation
|
||||
// --aes-pwd = 32-512 byte binary file for ME RPC auth
|
||||
//
|
||||
// proxy-secret is from: https://core.telegram.org/getProxySecret
|
||||
// =============================================================
|
||||
let proxy_secret_path = config.general.proxy_secret_path.as_deref();
|
||||
let pool_size = config.general.middle_proxy_pool_size.max(1);
|
||||
let proxy_secret = loop {
|
||||
match crate::transport::middle_proxy::fetch_proxy_secret_with_upstream(
|
||||
proxy_secret_path,
|
||||
config.general.proxy_secret_len_max,
|
||||
Some(upstream_manager.clone()),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(proxy_secret) => break Some(proxy_secret),
|
||||
Err(e) => {
|
||||
startup_tracker.set_me_last_error(Some(e.to_string())).await;
|
||||
if me2dc_fallback {
|
||||
error!(
|
||||
error = %e,
|
||||
"ME startup failed: proxy-secret is unavailable and no saved secret found; falling back to direct mode"
|
||||
);
|
||||
break None;
|
||||
}
|
||||
|
||||
warn!(
|
||||
error = %e,
|
||||
retry_in_secs = 2,
|
||||
"ME startup failed: proxy-secret is unavailable and no saved secret found; retrying because me2dc_fallback=false"
|
||||
);
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
}
|
||||
};
|
||||
match proxy_secret {
|
||||
Some(proxy_secret) => {
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_ME_SECRET_FETCH,
|
||||
Some("proxy-secret loaded".to_string()),
|
||||
)
|
||||
.await;
|
||||
info!(
|
||||
secret_len = proxy_secret.len(),
|
||||
key_sig = format_args!(
|
||||
"0x{:08x}",
|
||||
if proxy_secret.len() >= 4 {
|
||||
u32::from_le_bytes([
|
||||
proxy_secret[0],
|
||||
proxy_secret[1],
|
||||
proxy_secret[2],
|
||||
proxy_secret[3],
|
||||
])
|
||||
} else {
|
||||
0
|
||||
}
|
||||
),
|
||||
"Proxy-secret loaded"
|
||||
);
|
||||
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V4,
|
||||
Some("load startup proxy-config v4".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Initializing, COMPONENT_ME_PROXY_CONFIG_V4)
|
||||
.await;
|
||||
let cfg_v4 = load_startup_proxy_config_snapshot(
|
||||
"https://core.telegram.org/getProxyConfig",
|
||||
config.general.proxy_config_v4_cache_path.as_deref(),
|
||||
me2dc_fallback,
|
||||
"getProxyConfig",
|
||||
Some(upstream_manager.clone()),
|
||||
)
|
||||
.await;
|
||||
if cfg_v4.is_some() {
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V4,
|
||||
Some("proxy-config v4 loaded".to_string()),
|
||||
)
|
||||
.await;
|
||||
} else {
|
||||
startup_tracker
|
||||
.fail_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V4,
|
||||
Some("proxy-config v4 unavailable".to_string()),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V6,
|
||||
Some("load startup proxy-config v6".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Initializing, COMPONENT_ME_PROXY_CONFIG_V6)
|
||||
.await;
|
||||
let cfg_v6 = load_startup_proxy_config_snapshot(
|
||||
"https://core.telegram.org/getProxyConfigV6",
|
||||
config.general.proxy_config_v6_cache_path.as_deref(),
|
||||
me2dc_fallback,
|
||||
"getProxyConfigV6",
|
||||
Some(upstream_manager.clone()),
|
||||
)
|
||||
.await;
|
||||
if cfg_v6.is_some() {
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V6,
|
||||
Some("proxy-config v6 loaded".to_string()),
|
||||
)
|
||||
.await;
|
||||
} else {
|
||||
startup_tracker
|
||||
.fail_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V6,
|
||||
Some("proxy-config v6 unavailable".to_string()),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
if let (Some(cfg_v4), Some(cfg_v6)) = (cfg_v4, cfg_v6) {
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_ME_POOL_CONSTRUCT,
|
||||
Some("construct ME pool".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Initializing, COMPONENT_ME_POOL_CONSTRUCT)
|
||||
.await;
|
||||
let pool = MePool::new(
|
||||
proxy_tag.clone(),
|
||||
proxy_secret,
|
||||
config.general.middle_proxy_nat_ip,
|
||||
me_nat_probe,
|
||||
None,
|
||||
config.network.stun_servers.clone(),
|
||||
config.general.stun_nat_probe_concurrency,
|
||||
probe.detected_ipv6,
|
||||
config.timeouts.me_one_retry,
|
||||
config.timeouts.me_one_timeout_ms,
|
||||
cfg_v4.map.clone(),
|
||||
cfg_v6.map.clone(),
|
||||
cfg_v4.default_dc.or(cfg_v6.default_dc),
|
||||
decision.clone(),
|
||||
Some(upstream_manager.clone()),
|
||||
rng.clone(),
|
||||
stats.clone(),
|
||||
config.general.me_keepalive_enabled,
|
||||
config.general.me_keepalive_interval_secs,
|
||||
config.general.me_keepalive_jitter_secs,
|
||||
config.general.me_keepalive_payload_random,
|
||||
config.general.rpc_proxy_req_every,
|
||||
config.general.me_warmup_stagger_enabled,
|
||||
config.general.me_warmup_step_delay_ms,
|
||||
config.general.me_warmup_step_jitter_ms,
|
||||
config.general.me_reconnect_max_concurrent_per_dc,
|
||||
config.general.me_reconnect_backoff_base_ms,
|
||||
config.general.me_reconnect_backoff_cap_ms,
|
||||
config.general.me_reconnect_fast_retry_count,
|
||||
config.general.me_single_endpoint_shadow_writers,
|
||||
config.general.me_single_endpoint_outage_mode_enabled,
|
||||
config.general.me_single_endpoint_outage_disable_quarantine,
|
||||
config.general.me_single_endpoint_outage_backoff_min_ms,
|
||||
config.general.me_single_endpoint_outage_backoff_max_ms,
|
||||
config.general.me_single_endpoint_shadow_rotate_every_secs,
|
||||
config.general.me_floor_mode,
|
||||
config.general.me_adaptive_floor_idle_secs,
|
||||
config.general.me_adaptive_floor_min_writers_single_endpoint,
|
||||
config.general.me_adaptive_floor_min_writers_multi_endpoint,
|
||||
config.general.me_adaptive_floor_recover_grace_secs,
|
||||
config.general.me_adaptive_floor_writers_per_core_total,
|
||||
config.general.me_adaptive_floor_cpu_cores_override,
|
||||
config
|
||||
.general
|
||||
.me_adaptive_floor_max_extra_writers_single_per_core,
|
||||
config
|
||||
.general
|
||||
.me_adaptive_floor_max_extra_writers_multi_per_core,
|
||||
config.general.me_adaptive_floor_max_active_writers_per_core,
|
||||
config.general.me_adaptive_floor_max_warm_writers_per_core,
|
||||
config.general.me_adaptive_floor_max_active_writers_global,
|
||||
config.general.me_adaptive_floor_max_warm_writers_global,
|
||||
config.general.hardswap,
|
||||
config.general.me_pool_drain_ttl_secs,
|
||||
config.general.me_instadrain,
|
||||
config.general.me_pool_drain_threshold,
|
||||
config.general.me_pool_drain_soft_evict_enabled,
|
||||
config.general.me_pool_drain_soft_evict_grace_secs,
|
||||
config.general.me_pool_drain_soft_evict_per_writer,
|
||||
config.general.me_pool_drain_soft_evict_budget_per_core,
|
||||
config.general.me_pool_drain_soft_evict_cooldown_ms,
|
||||
config.general.effective_me_pool_force_close_secs(),
|
||||
config.general.me_pool_min_fresh_ratio,
|
||||
config.general.me_hardswap_warmup_delay_min_ms,
|
||||
config.general.me_hardswap_warmup_delay_max_ms,
|
||||
config.general.me_hardswap_warmup_extra_passes,
|
||||
config.general.me_hardswap_warmup_pass_backoff_base_ms,
|
||||
config.general.me_bind_stale_mode,
|
||||
config.general.me_bind_stale_ttl_secs,
|
||||
config.general.me_secret_atomic_snapshot,
|
||||
config.general.me_deterministic_writer_sort,
|
||||
config.general.me_writer_pick_mode,
|
||||
config.general.me_writer_pick_sample_size,
|
||||
config.general.me_socks_kdf_policy,
|
||||
config.general.me_writer_cmd_channel_capacity,
|
||||
config.general.me_route_channel_capacity,
|
||||
config.general.me_route_backpressure_base_timeout_ms,
|
||||
config.general.me_route_backpressure_high_timeout_ms,
|
||||
config.general.me_route_backpressure_high_watermark_pct,
|
||||
config.general.me_reader_route_data_wait_ms,
|
||||
config.general.me_health_interval_ms_unhealthy,
|
||||
config.general.me_health_interval_ms_healthy,
|
||||
config.general.me_warn_rate_limit_ms,
|
||||
config.general.me_route_no_writer_mode,
|
||||
config.general.me_route_no_writer_wait_ms,
|
||||
config.general.me_route_hybrid_max_wait_ms,
|
||||
config.general.me_route_blocking_send_timeout_ms,
|
||||
config.general.me_route_inline_recovery_attempts,
|
||||
config.general.me_route_inline_recovery_wait_ms,
|
||||
);
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_ME_POOL_CONSTRUCT,
|
||||
Some("ME pool object created".to_string()),
|
||||
)
|
||||
.await;
|
||||
*api_me_pool.write().await = Some(pool.clone());
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||
Some("initialize ME pool writers".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Initializing, COMPONENT_ME_POOL_INIT_STAGE1)
|
||||
.await;
|
||||
|
||||
if me2dc_fallback {
|
||||
let pool_bg = pool.clone();
|
||||
let rng_bg = rng.clone();
|
||||
let startup_tracker_bg = startup_tracker.clone();
|
||||
let retry_limit = if me_init_retry_attempts == 0 {
|
||||
String::from("unlimited")
|
||||
} else {
|
||||
me_init_retry_attempts.to_string()
|
||||
};
|
||||
std::thread::spawn(move || {
|
||||
let runtime = match tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
{
|
||||
Ok(runtime) => runtime,
|
||||
Err(error) => {
|
||||
error!(error = %error, "Failed to build background runtime for ME initialization");
|
||||
return;
|
||||
}
|
||||
};
|
||||
runtime.block_on(async move {
|
||||
let mut init_attempt: u32 = 0;
|
||||
loop {
|
||||
init_attempt = init_attempt.saturating_add(1);
|
||||
startup_tracker_bg.set_me_init_attempt(init_attempt).await;
|
||||
match pool_bg.init(pool_size, &rng_bg).await {
|
||||
Ok(()) => {
|
||||
startup_tracker_bg.set_me_last_error(None).await;
|
||||
startup_tracker_bg
|
||||
.complete_component(
|
||||
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||
Some("ME pool initialized".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker_bg
|
||||
.set_me_status(StartupMeStatus::Ready, "ready")
|
||||
.await;
|
||||
info!(
|
||||
attempt = init_attempt,
|
||||
"Middle-End pool initialized successfully"
|
||||
);
|
||||
|
||||
// ── Supervised background tasks ──────────────────
|
||||
// Each task runs inside a nested tokio::spawn so
|
||||
// that a panic is caught via JoinHandle and the
|
||||
// outer loop restarts the task automatically.
|
||||
let pool_health = pool_bg.clone();
|
||||
let rng_health = rng_bg.clone();
|
||||
let min_conns = pool_size;
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let p = pool_health.clone();
|
||||
let r = rng_health.clone();
|
||||
let res = tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_health_monitor(
|
||||
p, r, min_conns,
|
||||
)
|
||||
.await;
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(()) => warn!("me_health_monitor exited unexpectedly, restarting"),
|
||||
Err(e) => {
|
||||
error!(error = %e, "me_health_monitor panicked, restarting in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
let pool_drain_enforcer = pool_bg.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let p = pool_drain_enforcer.clone();
|
||||
let res = tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_drain_timeout_enforcer(p).await;
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(()) => warn!("me_drain_timeout_enforcer exited unexpectedly, restarting"),
|
||||
Err(e) => {
|
||||
error!(error = %e, "me_drain_timeout_enforcer panicked, restarting in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
let pool_watchdog = pool_bg.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let p = pool_watchdog.clone();
|
||||
let res = tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_zombie_writer_watchdog(p).await;
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(()) => warn!("me_zombie_writer_watchdog exited unexpectedly, restarting"),
|
||||
Err(e) => {
|
||||
error!(error = %e, "me_zombie_writer_watchdog panicked, restarting in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
// CRITICAL: keep the current-thread runtime
|
||||
// alive. Without this, block_on() returns,
|
||||
// the Runtime is dropped, and ALL spawned
|
||||
// background tasks (health monitor, drain
|
||||
// enforcer, zombie watchdog) are silently
|
||||
// cancelled — causing the draining-writer
|
||||
// leak that brought us here.
|
||||
std::future::pending::<()>().await;
|
||||
unreachable!();
|
||||
}
|
||||
Err(e) => {
|
||||
startup_tracker_bg.set_me_last_error(Some(e.to_string())).await;
|
||||
if init_attempt >= me_init_warn_after_attempts {
|
||||
warn!(
|
||||
error = %e,
|
||||
attempt = init_attempt,
|
||||
retry_limit = %retry_limit,
|
||||
retry_in_secs = 2,
|
||||
"ME pool is not ready yet; retrying background initialization"
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
error = %e,
|
||||
attempt = init_attempt,
|
||||
retry_limit = %retry_limit,
|
||||
retry_in_secs = 2,
|
||||
"ME pool startup warmup: retrying background initialization"
|
||||
);
|
||||
}
|
||||
pool_bg.reset_stun_state();
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Initializing, "background_init")
|
||||
.await;
|
||||
info!(
|
||||
startup_grace_secs = 80,
|
||||
"ME pool initialization continues in background; startup continues with conditional Direct fallback"
|
||||
);
|
||||
Some(pool)
|
||||
} else {
|
||||
let mut init_attempt: u32 = 0;
|
||||
loop {
|
||||
init_attempt = init_attempt.saturating_add(1);
|
||||
startup_tracker.set_me_init_attempt(init_attempt).await;
|
||||
match pool.init(pool_size, &rng).await {
|
||||
Ok(()) => {
|
||||
startup_tracker.set_me_last_error(None).await;
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||
Some("ME pool initialized".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Ready, "ready")
|
||||
.await;
|
||||
info!(
|
||||
attempt = init_attempt,
|
||||
"Middle-End pool initialized successfully"
|
||||
);
|
||||
|
||||
// ── Supervised background tasks ──────────────────
|
||||
let pool_clone = pool.clone();
|
||||
let rng_clone = rng.clone();
|
||||
let min_conns = pool_size;
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let p = pool_clone.clone();
|
||||
let r = rng_clone.clone();
|
||||
let res = tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_health_monitor(
|
||||
p, r, min_conns,
|
||||
)
|
||||
.await;
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(()) => warn!(
|
||||
"me_health_monitor exited unexpectedly, restarting"
|
||||
),
|
||||
Err(e) => {
|
||||
error!(error = %e, "me_health_monitor panicked, restarting in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
let pool_drain_enforcer = pool.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let p = pool_drain_enforcer.clone();
|
||||
let res = tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_drain_timeout_enforcer(p).await;
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(()) => warn!(
|
||||
"me_drain_timeout_enforcer exited unexpectedly, restarting"
|
||||
),
|
||||
Err(e) => {
|
||||
error!(error = %e, "me_drain_timeout_enforcer panicked, restarting in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
let pool_watchdog = pool.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let p = pool_watchdog.clone();
|
||||
let res = tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_zombie_writer_watchdog(p).await;
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(()) => warn!(
|
||||
"me_zombie_writer_watchdog exited unexpectedly, restarting"
|
||||
),
|
||||
Err(e) => {
|
||||
error!(error = %e, "me_zombie_writer_watchdog panicked, restarting in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
break Some(pool);
|
||||
}
|
||||
Err(e) => {
|
||||
startup_tracker.set_me_last_error(Some(e.to_string())).await;
|
||||
let retries_limited = me_init_retry_attempts > 0;
|
||||
if retries_limited && init_attempt >= me_init_retry_attempts {
|
||||
startup_tracker
|
||||
.fail_component(
|
||||
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||
Some("ME init retry budget exhausted".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Failed, "failed")
|
||||
.await;
|
||||
error!(
|
||||
error = %e,
|
||||
attempt = init_attempt,
|
||||
retry_limit = me_init_retry_attempts,
|
||||
"ME pool init retries exhausted; startup cannot continue in middle-proxy mode"
|
||||
);
|
||||
break None;
|
||||
}
|
||||
|
||||
let retry_limit = if me_init_retry_attempts == 0 {
|
||||
String::from("unlimited")
|
||||
} else {
|
||||
me_init_retry_attempts.to_string()
|
||||
};
|
||||
if init_attempt >= me_init_warn_after_attempts {
|
||||
warn!(
|
||||
error = %e,
|
||||
attempt = init_attempt,
|
||||
retry_limit = retry_limit,
|
||||
me2dc_fallback = me2dc_fallback,
|
||||
retry_in_secs = 2,
|
||||
"ME pool is not ready yet; retrying startup initialization"
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
error = %e,
|
||||
attempt = init_attempt,
|
||||
retry_limit = retry_limit,
|
||||
me2dc_fallback = me2dc_fallback,
|
||||
retry_in_secs = 2,
|
||||
"ME pool startup warmup: retrying initialization"
|
||||
);
|
||||
}
|
||||
pool.reset_stun_state();
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_POOL_CONSTRUCT,
|
||||
Some("ME configs are incomplete".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.fail_component(
|
||||
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||
Some("ME configs are incomplete".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Failed, "failed")
|
||||
.await;
|
||||
None
|
||||
}
|
||||
}
|
||||
None => {
|
||||
startup_tracker
|
||||
.fail_component(
|
||||
COMPONENT_ME_SECRET_FETCH,
|
||||
Some("proxy-secret unavailable".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V4,
|
||||
Some("proxy-secret unavailable".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V6,
|
||||
Some("proxy-secret unavailable".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_POOL_CONSTRUCT,
|
||||
Some("proxy-secret unavailable".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.fail_component(
|
||||
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||
Some("proxy-secret unavailable".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Failed, "failed")
|
||||
.await;
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,816 @@
|
|||
//! telemt — Telegram MTProto Proxy
|
||||
|
||||
#![allow(unused_assignments)]
|
||||
|
||||
// Runtime orchestration modules.
|
||||
// - helpers: CLI and shared startup/runtime helper routines.
|
||||
// - tls_bootstrap: TLS front cache bootstrap and refresh tasks.
|
||||
// - me_startup: Middle-End secret/config fetch and pool initialization.
|
||||
// - connectivity: startup ME/DC connectivity diagnostics.
|
||||
// - runtime_tasks: hot-reload and background task orchestration.
|
||||
// - admission: conditional-cast gate and route mode switching.
|
||||
// - listeners: TCP/Unix listener bind and accept-loop orchestration.
|
||||
// - shutdown: graceful shutdown sequence and uptime logging.
|
||||
mod admission;
|
||||
mod connectivity;
|
||||
mod helpers;
|
||||
mod listeners;
|
||||
mod me_startup;
|
||||
mod runtime_tasks;
|
||||
mod shutdown;
|
||||
mod tls_bootstrap;
|
||||
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||
use tokio::sync::{RwLock, Semaphore, watch};
|
||||
use tracing::{error, info, warn};
|
||||
use tracing_subscriber::{EnvFilter, fmt, prelude::*, reload};
|
||||
|
||||
use crate::api;
|
||||
use crate::config::{LogLevel, ProxyConfig};
|
||||
use crate::conntrack_control;
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::network::probe::{decide_network_capabilities, log_probe_result, run_probe};
|
||||
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
|
||||
use crate::proxy::shared_state::ProxySharedState;
|
||||
use crate::startup::{
|
||||
COMPONENT_API_BOOTSTRAP, COMPONENT_CONFIG_LOAD, COMPONENT_ME_POOL_CONSTRUCT,
|
||||
COMPONENT_ME_POOL_INIT_STAGE1, COMPONENT_ME_PROXY_CONFIG_V4, COMPONENT_ME_PROXY_CONFIG_V6,
|
||||
COMPONENT_ME_SECRET_FETCH, COMPONENT_NETWORK_PROBE, COMPONENT_TRACING_INIT, StartupMeStatus,
|
||||
StartupTracker,
|
||||
};
|
||||
use crate::stats::beobachten::BeobachtenStore;
|
||||
use crate::stats::telemetry::TelemetryPolicy;
|
||||
use crate::stats::{ReplayChecker, Stats};
|
||||
use crate::stream::BufferPool;
|
||||
use crate::transport::UpstreamManager;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
use helpers::{parse_cli, resolve_runtime_config_path};
|
||||
|
||||
#[cfg(unix)]
|
||||
use crate::daemon::{DaemonOptions, PidFile, drop_privileges};
|
||||
|
||||
/// Runs the full telemt runtime startup pipeline and blocks until shutdown.
|
||||
///
|
||||
/// On Unix, daemon options should be handled before calling this function
|
||||
/// (daemonization must happen before tokio runtime starts).
|
||||
#[cfg(unix)]
|
||||
pub async fn run_with_daemon(
|
||||
daemon_opts: DaemonOptions,
|
||||
) -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
run_inner(daemon_opts).await
|
||||
}
|
||||
|
||||
/// Runs the full telemt runtime startup pipeline and blocks until shutdown.
|
||||
///
|
||||
/// This is the main entry point for non-daemon mode or when called as a library.
|
||||
#[allow(dead_code)]
|
||||
pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
// Parse CLI to get daemon options even in simple run() path
|
||||
let args: Vec<String> = std::env::args().skip(1).collect();
|
||||
let daemon_opts = crate::cli::parse_daemon_args(&args);
|
||||
run_inner(daemon_opts).await
|
||||
}
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
run_inner().await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
async fn run_inner(
|
||||
daemon_opts: DaemonOptions,
|
||||
) -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
// Acquire PID file if daemonizing or if explicitly requested
|
||||
// Keep it alive until shutdown (underscore prefix = intentionally kept for RAII cleanup)
|
||||
let _pid_file = if daemon_opts.daemonize || daemon_opts.pid_file.is_some() {
|
||||
let mut pf = PidFile::new(daemon_opts.pid_file_path());
|
||||
if let Err(e) = pf.acquire() {
|
||||
eprintln!("[telemt] {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
Some(pf)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let process_started_at = Instant::now();
|
||||
let process_started_at_epoch_secs = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
let startup_tracker = Arc::new(StartupTracker::new(process_started_at_epoch_secs));
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_CONFIG_LOAD,
|
||||
Some("load and validate config".to_string()),
|
||||
)
|
||||
.await;
|
||||
let cli_args = parse_cli();
|
||||
let config_path_cli = cli_args.config_path;
|
||||
let config_path_explicit = cli_args.config_path_explicit;
|
||||
let data_path = cli_args.data_path;
|
||||
let cli_silent = cli_args.silent;
|
||||
let cli_log_level = cli_args.log_level;
|
||||
let log_destination = cli_args.log_destination;
|
||||
let startup_cwd = match std::env::current_dir() {
|
||||
Ok(cwd) => cwd,
|
||||
Err(e) => {
|
||||
eprintln!("[telemt] Can't read current_dir: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
let mut config_path =
|
||||
resolve_runtime_config_path(&config_path_cli, &startup_cwd, config_path_explicit);
|
||||
|
||||
let mut config = match ProxyConfig::load(&config_path) {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
if config_path.exists() {
|
||||
eprintln!("[telemt] Error: {}", e);
|
||||
std::process::exit(1);
|
||||
} else {
|
||||
let default = ProxyConfig::default();
|
||||
|
||||
let serialized =
|
||||
match toml::to_string_pretty(&default).or_else(|_| toml::to_string(&default)) {
|
||||
Ok(value) => Some(value),
|
||||
Err(serialize_error) => {
|
||||
eprintln!(
|
||||
"[telemt] Warning: failed to serialize default config: {}",
|
||||
serialize_error
|
||||
);
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
if config_path_explicit {
|
||||
if let Some(serialized) = serialized.as_ref() {
|
||||
if let Err(write_error) = std::fs::write(&config_path, serialized) {
|
||||
eprintln!(
|
||||
"[telemt] Error: failed to create explicit config at {}: {}",
|
||||
config_path.display(),
|
||||
write_error
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
eprintln!(
|
||||
"[telemt] Created default config at {}",
|
||||
config_path.display()
|
||||
);
|
||||
} else {
|
||||
eprintln!(
|
||||
"[telemt] Warning: running with in-memory default config without writing to disk"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
let system_dir = std::path::Path::new("/etc/telemt");
|
||||
let system_config_path = system_dir.join("telemt.toml");
|
||||
let startup_config_path = startup_cwd.join("config.toml");
|
||||
let mut persisted = false;
|
||||
|
||||
if let Some(serialized) = serialized.as_ref() {
|
||||
match std::fs::create_dir_all(system_dir) {
|
||||
Ok(()) => match std::fs::write(&system_config_path, serialized) {
|
||||
Ok(()) => {
|
||||
config_path = system_config_path;
|
||||
eprintln!(
|
||||
"[telemt] Created default config at {}",
|
||||
config_path.display()
|
||||
);
|
||||
persisted = true;
|
||||
}
|
||||
Err(write_error) => {
|
||||
eprintln!(
|
||||
"[telemt] Warning: failed to write default config at {}: {}",
|
||||
system_config_path.display(),
|
||||
write_error
|
||||
);
|
||||
}
|
||||
},
|
||||
Err(create_error) => {
|
||||
eprintln!(
|
||||
"[telemt] Warning: failed to create {}: {}",
|
||||
system_dir.display(),
|
||||
create_error
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if !persisted {
|
||||
match std::fs::write(&startup_config_path, serialized) {
|
||||
Ok(()) => {
|
||||
config_path = startup_config_path;
|
||||
eprintln!(
|
||||
"[telemt] Created default config at {}",
|
||||
config_path.display()
|
||||
);
|
||||
persisted = true;
|
||||
}
|
||||
Err(write_error) => {
|
||||
eprintln!(
|
||||
"[telemt] Warning: failed to write default config at {}: {}",
|
||||
startup_config_path.display(),
|
||||
write_error
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !persisted {
|
||||
eprintln!(
|
||||
"[telemt] Warning: running with in-memory default config without writing to disk"
|
||||
);
|
||||
}
|
||||
}
|
||||
default
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = config.validate() {
|
||||
eprintln!("[telemt] Invalid config: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
if let Some(p) = data_path {
|
||||
config.general.data_path = Some(p);
|
||||
}
|
||||
|
||||
if let Some(ref data_path) = config.general.data_path {
|
||||
if !data_path.is_absolute() {
|
||||
eprintln!(
|
||||
"[telemt] data_path must be absolute: {}",
|
||||
data_path.display()
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
if data_path.exists() {
|
||||
if !data_path.is_dir() {
|
||||
eprintln!(
|
||||
"[telemt] data_path exists but is not a directory: {}",
|
||||
data_path.display()
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
} else if let Err(e) = std::fs::create_dir_all(data_path) {
|
||||
eprintln!(
|
||||
"[telemt] Can't create data_path {}: {}",
|
||||
data_path.display(),
|
||||
e
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
if let Err(e) = std::env::set_current_dir(data_path) {
|
||||
eprintln!(
|
||||
"[telemt] Can't use data_path {}: {}",
|
||||
data_path.display(),
|
||||
e
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(e) = crate::network::dns_overrides::install_entries(&config.network.dns_overrides) {
|
||||
eprintln!("[telemt] Invalid network.dns_overrides: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
startup_tracker
|
||||
.complete_component(COMPONENT_CONFIG_LOAD, Some("config is ready".to_string()))
|
||||
.await;
|
||||
|
||||
let has_rust_log = std::env::var("RUST_LOG").is_ok();
|
||||
let effective_log_level = if cli_silent {
|
||||
LogLevel::Silent
|
||||
} else if let Some(ref s) = cli_log_level {
|
||||
LogLevel::from_str_loose(s)
|
||||
} else {
|
||||
config.general.log_level.clone()
|
||||
};
|
||||
|
||||
let (filter_layer, filter_handle) = reload::Layer::new(EnvFilter::new("info"));
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_TRACING_INIT,
|
||||
Some("initialize tracing subscriber".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Initialize logging based on destination
|
||||
let _logging_guard: Option<crate::logging::LoggingGuard>;
|
||||
match log_destination {
|
||||
crate::logging::LogDestination::Stderr => {
|
||||
// Default: log to stderr (works with systemd journald)
|
||||
let fmt_layer = if config.general.disable_colors {
|
||||
fmt::Layer::default().with_ansi(false)
|
||||
} else {
|
||||
fmt::Layer::default().with_ansi(true)
|
||||
};
|
||||
tracing_subscriber::registry()
|
||||
.with(filter_layer)
|
||||
.with(fmt_layer)
|
||||
.init();
|
||||
_logging_guard = None;
|
||||
}
|
||||
#[cfg(unix)]
|
||||
crate::logging::LogDestination::Syslog => {
|
||||
// Syslog: for OpenRC/FreeBSD
|
||||
let logging_opts = crate::logging::LoggingOptions {
|
||||
destination: log_destination,
|
||||
disable_colors: true,
|
||||
};
|
||||
let (_, guard) = crate::logging::init_logging(&logging_opts, "info");
|
||||
_logging_guard = Some(guard);
|
||||
}
|
||||
crate::logging::LogDestination::File { .. } => {
|
||||
// File logging with optional rotation
|
||||
let logging_opts = crate::logging::LoggingOptions {
|
||||
destination: log_destination,
|
||||
disable_colors: true,
|
||||
};
|
||||
let (_, guard) = crate::logging::init_logging(&logging_opts, "info");
|
||||
_logging_guard = Some(guard);
|
||||
}
|
||||
}
|
||||
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_TRACING_INIT,
|
||||
Some("tracing initialized".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
info!("Telemt MTProxy v{}", env!("CARGO_PKG_VERSION"));
|
||||
info!("Log level: {}", effective_log_level);
|
||||
if config.general.disable_colors {
|
||||
info!("Colors: disabled");
|
||||
}
|
||||
info!(
|
||||
"Modes: classic={} secure={} tls={}",
|
||||
config.general.modes.classic, config.general.modes.secure, config.general.modes.tls
|
||||
);
|
||||
if config.general.modes.classic {
|
||||
warn!("Classic mode is vulnerable to DPI detection; enable only for legacy clients");
|
||||
}
|
||||
info!("TLS domain: {}", config.censorship.tls_domain);
|
||||
if let Some(ref sock) = config.censorship.mask_unix_sock {
|
||||
info!("Mask: {} -> unix:{}", config.censorship.mask, sock);
|
||||
if !std::path::Path::new(sock).exists() {
|
||||
warn!(
|
||||
"Unix socket '{}' does not exist yet. Masking will fail until it appears.",
|
||||
sock
|
||||
);
|
||||
}
|
||||
} else {
|
||||
info!(
|
||||
"Mask: {} -> {}:{}",
|
||||
config.censorship.mask,
|
||||
config
|
||||
.censorship
|
||||
.mask_host
|
||||
.as_deref()
|
||||
.unwrap_or(&config.censorship.tls_domain),
|
||||
config.censorship.mask_port
|
||||
);
|
||||
}
|
||||
|
||||
if config.censorship.tls_domain == "www.google.com" {
|
||||
warn!("Using default tls_domain. Consider setting a custom domain.");
|
||||
}
|
||||
|
||||
let stats = Arc::new(Stats::new());
|
||||
stats.apply_telemetry_policy(TelemetryPolicy::from_config(&config.general.telemetry));
|
||||
|
||||
let upstream_manager = Arc::new(UpstreamManager::new(
|
||||
config.upstreams.clone(),
|
||||
config.general.upstream_connect_retry_attempts,
|
||||
config.general.upstream_connect_retry_backoff_ms,
|
||||
config.general.upstream_connect_budget_ms,
|
||||
config.general.tg_connect,
|
||||
config.general.upstream_unhealthy_fail_threshold,
|
||||
config.general.upstream_connect_failfast_hard_errors,
|
||||
stats.clone(),
|
||||
));
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker
|
||||
.load_limits(
|
||||
config.access.user_max_unique_ips_global_each,
|
||||
&config.access.user_max_unique_ips,
|
||||
)
|
||||
.await;
|
||||
ip_tracker
|
||||
.set_limit_policy(
|
||||
config.access.user_max_unique_ips_mode,
|
||||
config.access.user_max_unique_ips_window_secs,
|
||||
)
|
||||
.await;
|
||||
if config.access.user_max_unique_ips_global_each > 0
|
||||
|| !config.access.user_max_unique_ips.is_empty()
|
||||
{
|
||||
info!(
|
||||
global_each_limit = config.access.user_max_unique_ips_global_each,
|
||||
explicit_user_limits = config.access.user_max_unique_ips.len(),
|
||||
"User unique IP limits configured"
|
||||
);
|
||||
}
|
||||
if !config.network.dns_overrides.is_empty() {
|
||||
info!(
|
||||
"Runtime DNS overrides configured: {} entries",
|
||||
config.network.dns_overrides.len()
|
||||
);
|
||||
}
|
||||
|
||||
let (api_config_tx, api_config_rx) = watch::channel(Arc::new(config.clone()));
|
||||
let (detected_ips_tx, detected_ips_rx) = watch::channel((None::<IpAddr>, None::<IpAddr>));
|
||||
let initial_admission_open = !config.general.use_middle_proxy;
|
||||
let (admission_tx, admission_rx) = watch::channel(initial_admission_open);
|
||||
let initial_route_mode = if config.general.use_middle_proxy {
|
||||
RelayRouteMode::Middle
|
||||
} else {
|
||||
RelayRouteMode::Direct
|
||||
};
|
||||
let route_runtime = Arc::new(RouteRuntimeController::new(initial_route_mode));
|
||||
let api_me_pool = Arc::new(RwLock::new(None::<Arc<MePool>>));
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_API_BOOTSTRAP,
|
||||
Some("spawn API listener task".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
if config.server.api.enabled {
|
||||
let listen = match config.server.api.listen.parse::<SocketAddr>() {
|
||||
Ok(listen) => listen,
|
||||
Err(error) => {
|
||||
warn!(
|
||||
error = %error,
|
||||
listen = %config.server.api.listen,
|
||||
"Invalid server.api.listen; API is disabled"
|
||||
);
|
||||
SocketAddr::from(([127, 0, 0, 1], 0))
|
||||
}
|
||||
};
|
||||
if listen.port() != 0 {
|
||||
let stats_api = stats.clone();
|
||||
let ip_tracker_api = ip_tracker.clone();
|
||||
let me_pool_api = api_me_pool.clone();
|
||||
let upstream_manager_api = upstream_manager.clone();
|
||||
let route_runtime_api = route_runtime.clone();
|
||||
let config_rx_api = api_config_rx.clone();
|
||||
let admission_rx_api = admission_rx.clone();
|
||||
let config_path_api = config_path.clone();
|
||||
let startup_tracker_api = startup_tracker.clone();
|
||||
let detected_ips_rx_api = detected_ips_rx.clone();
|
||||
tokio::spawn(async move {
|
||||
api::serve(
|
||||
listen,
|
||||
stats_api,
|
||||
ip_tracker_api,
|
||||
me_pool_api,
|
||||
route_runtime_api,
|
||||
upstream_manager_api,
|
||||
config_rx_api,
|
||||
admission_rx_api,
|
||||
config_path_api,
|
||||
detected_ips_rx_api,
|
||||
process_started_at_epoch_secs,
|
||||
startup_tracker_api,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_API_BOOTSTRAP,
|
||||
Some(format!("api task spawned on {}", listen)),
|
||||
)
|
||||
.await;
|
||||
} else {
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_API_BOOTSTRAP,
|
||||
Some("server.api.listen has zero port".to_string()),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
} else {
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_API_BOOTSTRAP,
|
||||
Some("server.api.enabled is false".to_string()),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
let mut tls_domains = Vec::with_capacity(1 + config.censorship.tls_domains.len());
|
||||
tls_domains.push(config.censorship.tls_domain.clone());
|
||||
for d in &config.censorship.tls_domains {
|
||||
if !tls_domains.contains(d) {
|
||||
tls_domains.push(d.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let tls_cache = tls_bootstrap::bootstrap_tls_front(
|
||||
&config,
|
||||
&tls_domains,
|
||||
upstream_manager.clone(),
|
||||
&startup_tracker,
|
||||
)
|
||||
.await;
|
||||
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_NETWORK_PROBE,
|
||||
Some("probe network capabilities".to_string()),
|
||||
)
|
||||
.await;
|
||||
let probe = run_probe(
|
||||
&config.network,
|
||||
&config.upstreams,
|
||||
config.general.middle_proxy_nat_probe,
|
||||
config.general.stun_nat_probe_concurrency,
|
||||
)
|
||||
.await?;
|
||||
detected_ips_tx.send_replace((
|
||||
probe.detected_ipv4.map(IpAddr::V4),
|
||||
probe.detected_ipv6.map(IpAddr::V6),
|
||||
));
|
||||
let decision =
|
||||
decide_network_capabilities(&config.network, &probe, config.general.middle_proxy_nat_ip);
|
||||
log_probe_result(&probe, &decision);
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_NETWORK_PROBE,
|
||||
Some("network capabilities determined".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
let prefer_ipv6 = decision.prefer_ipv6();
|
||||
let mut use_middle_proxy = config.general.use_middle_proxy;
|
||||
let beobachten = Arc::new(BeobachtenStore::new());
|
||||
let rng = Arc::new(SecureRandom::new());
|
||||
|
||||
// Connection concurrency limit (0 = unlimited)
|
||||
let max_connections_limit = if config.server.max_connections == 0 {
|
||||
Semaphore::MAX_PERMITS
|
||||
} else {
|
||||
config.server.max_connections as usize
|
||||
};
|
||||
let max_connections = Arc::new(Semaphore::new(max_connections_limit));
|
||||
|
||||
let me2dc_fallback = config.general.me2dc_fallback;
|
||||
let me_init_retry_attempts = config.general.me_init_retry_attempts;
|
||||
if use_middle_proxy && !decision.ipv4_me && !decision.ipv6_me {
|
||||
if me2dc_fallback {
|
||||
warn!("No usable IP family for Middle Proxy detected; falling back to direct DC");
|
||||
use_middle_proxy = false;
|
||||
} else {
|
||||
warn!(
|
||||
"No usable IP family for Middle Proxy detected; me2dc_fallback=false, ME init retries stay active"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if use_middle_proxy {
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Initializing, COMPONENT_ME_SECRET_FETCH)
|
||||
.await;
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_ME_SECRET_FETCH,
|
||||
Some("fetch proxy-secret from source/cache".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_me_retry_limit(if !me2dc_fallback || me_init_retry_attempts == 0 {
|
||||
"unlimited".to_string()
|
||||
} else {
|
||||
me_init_retry_attempts.to_string()
|
||||
})
|
||||
.await;
|
||||
} else {
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Skipped, "skipped")
|
||||
.await;
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_SECRET_FETCH,
|
||||
Some("middle proxy mode disabled".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V4,
|
||||
Some("middle proxy mode disabled".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_PROXY_CONFIG_V6,
|
||||
Some("middle proxy mode disabled".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_POOL_CONSTRUCT,
|
||||
Some("middle proxy mode disabled".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||
Some("middle proxy mode disabled".to_string()),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
let me_pool: Option<Arc<MePool>> = me_startup::initialize_me_pool(
|
||||
use_middle_proxy,
|
||||
&config,
|
||||
&decision,
|
||||
&probe,
|
||||
&startup_tracker,
|
||||
upstream_manager.clone(),
|
||||
rng.clone(),
|
||||
stats.clone(),
|
||||
api_me_pool.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
// If ME failed to initialize, force direct-only mode.
|
||||
if me_pool.is_some() {
|
||||
startup_tracker.set_transport_mode("middle_proxy").await;
|
||||
startup_tracker.set_degraded(false).await;
|
||||
info!("Transport: Middle-End Proxy - all DC-over-RPC");
|
||||
} else {
|
||||
let _ = use_middle_proxy;
|
||||
use_middle_proxy = false;
|
||||
// Make runtime config reflect direct-only mode for handlers.
|
||||
config.general.use_middle_proxy = false;
|
||||
startup_tracker.set_transport_mode("direct").await;
|
||||
startup_tracker.set_degraded(true).await;
|
||||
if me2dc_fallback {
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Failed, "fallback_to_direct")
|
||||
.await;
|
||||
} else {
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Skipped, "skipped")
|
||||
.await;
|
||||
}
|
||||
info!("Transport: Direct DC - TCP - standard DC-over-TCP");
|
||||
}
|
||||
|
||||
// Freeze config after possible fallback decision
|
||||
let config = Arc::new(config);
|
||||
|
||||
let replay_checker = Arc::new(ReplayChecker::new(
|
||||
config.access.replay_check_len,
|
||||
Duration::from_secs(config.access.replay_window_secs),
|
||||
));
|
||||
|
||||
let buffer_pool = Arc::new(BufferPool::with_config(64 * 1024, 4096));
|
||||
|
||||
connectivity::run_startup_connectivity(
|
||||
&config,
|
||||
&me_pool,
|
||||
rng.clone(),
|
||||
&startup_tracker,
|
||||
upstream_manager.clone(),
|
||||
prefer_ipv6,
|
||||
&decision,
|
||||
process_started_at,
|
||||
api_me_pool.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let runtime_watches = runtime_tasks::spawn_runtime_tasks(
|
||||
&config,
|
||||
&config_path,
|
||||
&probe,
|
||||
prefer_ipv6,
|
||||
decision.ipv4_dc,
|
||||
decision.ipv6_dc,
|
||||
&startup_tracker,
|
||||
stats.clone(),
|
||||
upstream_manager.clone(),
|
||||
replay_checker.clone(),
|
||||
me_pool.clone(),
|
||||
rng.clone(),
|
||||
ip_tracker.clone(),
|
||||
beobachten.clone(),
|
||||
api_config_tx.clone(),
|
||||
me_pool.clone(),
|
||||
)
|
||||
.await;
|
||||
let config_rx = runtime_watches.config_rx;
|
||||
let log_level_rx = runtime_watches.log_level_rx;
|
||||
let detected_ip_v4 = runtime_watches.detected_ip_v4;
|
||||
let detected_ip_v6 = runtime_watches.detected_ip_v6;
|
||||
|
||||
admission::configure_admission_gate(
|
||||
&config,
|
||||
me_pool.clone(),
|
||||
route_runtime.clone(),
|
||||
&admission_tx,
|
||||
config_rx.clone(),
|
||||
)
|
||||
.await;
|
||||
let _admission_tx_hold = admission_tx;
|
||||
let shared_state = ProxySharedState::new();
|
||||
conntrack_control::spawn_conntrack_controller(
|
||||
config_rx.clone(),
|
||||
stats.clone(),
|
||||
shared_state.clone(),
|
||||
);
|
||||
|
||||
let bound = listeners::bind_listeners(
|
||||
&config,
|
||||
decision.ipv4_dc,
|
||||
decision.ipv6_dc,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
&startup_tracker,
|
||||
config_rx.clone(),
|
||||
admission_rx.clone(),
|
||||
stats.clone(),
|
||||
upstream_manager.clone(),
|
||||
replay_checker.clone(),
|
||||
buffer_pool.clone(),
|
||||
rng.clone(),
|
||||
me_pool.clone(),
|
||||
route_runtime.clone(),
|
||||
tls_cache.clone(),
|
||||
ip_tracker.clone(),
|
||||
beobachten.clone(),
|
||||
shared_state.clone(),
|
||||
max_connections.clone(),
|
||||
)
|
||||
.await?;
|
||||
let listeners = bound.listeners;
|
||||
let has_unix_listener = bound.has_unix_listener;
|
||||
|
||||
if listeners.is_empty() && !has_unix_listener {
|
||||
error!("No listeners. Exiting.");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
// Drop privileges after binding sockets (which may require root for port < 1024)
|
||||
if daemon_opts.user.is_some() || daemon_opts.group.is_some() {
|
||||
if let Err(e) = drop_privileges(daemon_opts.user.as_deref(), daemon_opts.group.as_deref()) {
|
||||
error!(error = %e, "Failed to drop privileges");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
runtime_tasks::apply_runtime_log_filter(
|
||||
has_rust_log,
|
||||
&effective_log_level,
|
||||
filter_handle,
|
||||
log_level_rx,
|
||||
)
|
||||
.await;
|
||||
|
||||
runtime_tasks::spawn_metrics_if_configured(
|
||||
&config,
|
||||
&startup_tracker,
|
||||
stats.clone(),
|
||||
beobachten.clone(),
|
||||
ip_tracker.clone(),
|
||||
config_rx.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
runtime_tasks::mark_runtime_ready(&startup_tracker).await;
|
||||
|
||||
// Spawn signal handlers for SIGUSR1/SIGUSR2 (non-shutdown signals)
|
||||
shutdown::spawn_signal_handlers(stats.clone(), process_started_at);
|
||||
|
||||
listeners::spawn_tcp_accept_loops(
|
||||
listeners,
|
||||
config_rx.clone(),
|
||||
admission_rx.clone(),
|
||||
stats.clone(),
|
||||
upstream_manager.clone(),
|
||||
replay_checker.clone(),
|
||||
buffer_pool.clone(),
|
||||
rng.clone(),
|
||||
me_pool.clone(),
|
||||
route_runtime.clone(),
|
||||
tls_cache.clone(),
|
||||
ip_tracker.clone(),
|
||||
beobachten.clone(),
|
||||
shared_state,
|
||||
max_connections.clone(),
|
||||
);
|
||||
|
||||
shutdown::wait_for_shutdown(process_started_at, me_pool, stats).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
@ -0,0 +1,364 @@
|
|||
use std::net::IpAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::sync::{mpsc, watch};
|
||||
use tracing::{debug, warn};
|
||||
use tracing_subscriber::EnvFilter;
|
||||
use tracing_subscriber::reload;
|
||||
|
||||
use crate::config::hot_reload::spawn_config_watcher;
|
||||
use crate::config::{LogLevel, ProxyConfig};
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::metrics;
|
||||
use crate::network::probe::NetworkProbe;
|
||||
use crate::startup::{
|
||||
COMPONENT_CONFIG_WATCHER_START, COMPONENT_METRICS_START, COMPONENT_RUNTIME_READY,
|
||||
StartupTracker,
|
||||
};
|
||||
use crate::stats::beobachten::BeobachtenStore;
|
||||
use crate::stats::telemetry::TelemetryPolicy;
|
||||
use crate::stats::{ReplayChecker, Stats};
|
||||
use crate::transport::UpstreamManager;
|
||||
use crate::transport::middle_proxy::{MePool, MeReinitTrigger};
|
||||
|
||||
use super::helpers::write_beobachten_snapshot;
|
||||
|
||||
pub(crate) struct RuntimeWatches {
|
||||
pub(crate) config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
pub(crate) log_level_rx: watch::Receiver<LogLevel>,
|
||||
pub(crate) detected_ip_v4: Option<IpAddr>,
|
||||
pub(crate) detected_ip_v6: Option<IpAddr>,
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) async fn spawn_runtime_tasks(
|
||||
config: &Arc<ProxyConfig>,
|
||||
config_path: &Path,
|
||||
probe: &NetworkProbe,
|
||||
prefer_ipv6: bool,
|
||||
decision_ipv4_dc: bool,
|
||||
decision_ipv6_dc: bool,
|
||||
startup_tracker: &Arc<StartupTracker>,
|
||||
stats: Arc<Stats>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
replay_checker: Arc<ReplayChecker>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
rng: Arc<SecureRandom>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
api_config_tx: watch::Sender<Arc<ProxyConfig>>,
|
||||
me_pool_for_policy: Option<Arc<MePool>>,
|
||||
) -> RuntimeWatches {
|
||||
let um_clone = upstream_manager.clone();
|
||||
let dc_overrides_for_health = config.dc_overrides.clone();
|
||||
tokio::spawn(async move {
|
||||
um_clone
|
||||
.run_health_checks(
|
||||
prefer_ipv6,
|
||||
decision_ipv4_dc,
|
||||
decision_ipv6_dc,
|
||||
dc_overrides_for_health,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
|
||||
let rc_clone = replay_checker.clone();
|
||||
tokio::spawn(async move {
|
||||
rc_clone.run_periodic_cleanup().await;
|
||||
});
|
||||
|
||||
let detected_ip_v4: Option<IpAddr> = probe.detected_ipv4.map(IpAddr::V4);
|
||||
let detected_ip_v6: Option<IpAddr> = probe.detected_ipv6.map(IpAddr::V6);
|
||||
debug!(
|
||||
"Detected IPs: v4={:?} v6={:?}",
|
||||
detected_ip_v4, detected_ip_v6
|
||||
);
|
||||
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_CONFIG_WATCHER_START,
|
||||
Some("spawn config hot-reload watcher".to_string()),
|
||||
)
|
||||
.await;
|
||||
let (config_rx, log_level_rx): (watch::Receiver<Arc<ProxyConfig>>, watch::Receiver<LogLevel>) =
|
||||
spawn_config_watcher(
|
||||
config_path.to_path_buf(),
|
||||
config.clone(),
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
);
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_CONFIG_WATCHER_START,
|
||||
Some("config hot-reload watcher started".to_string()),
|
||||
)
|
||||
.await;
|
||||
let mut config_rx_api_bridge = config_rx.clone();
|
||||
let api_config_tx_bridge = api_config_tx.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if config_rx_api_bridge.changed().await.is_err() {
|
||||
break;
|
||||
}
|
||||
let cfg = config_rx_api_bridge.borrow_and_update().clone();
|
||||
api_config_tx_bridge.send_replace(cfg);
|
||||
}
|
||||
});
|
||||
|
||||
let stats_policy = stats.clone();
|
||||
let mut config_rx_policy = config_rx.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if config_rx_policy.changed().await.is_err() {
|
||||
break;
|
||||
}
|
||||
let cfg = config_rx_policy.borrow_and_update().clone();
|
||||
stats_policy
|
||||
.apply_telemetry_policy(TelemetryPolicy::from_config(&cfg.general.telemetry));
|
||||
if let Some(pool) = &me_pool_for_policy {
|
||||
pool.update_runtime_transport_policy(
|
||||
cfg.general.me_socks_kdf_policy,
|
||||
cfg.general.me_route_backpressure_base_timeout_ms,
|
||||
cfg.general.me_route_backpressure_high_timeout_ms,
|
||||
cfg.general.me_route_backpressure_high_watermark_pct,
|
||||
cfg.general.me_reader_route_data_wait_ms,
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let ip_tracker_policy = ip_tracker.clone();
|
||||
let mut config_rx_ip_limits = config_rx.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut prev_limits = config_rx_ip_limits
|
||||
.borrow()
|
||||
.access
|
||||
.user_max_unique_ips
|
||||
.clone();
|
||||
let mut prev_global_each = config_rx_ip_limits
|
||||
.borrow()
|
||||
.access
|
||||
.user_max_unique_ips_global_each;
|
||||
let mut prev_mode = config_rx_ip_limits.borrow().access.user_max_unique_ips_mode;
|
||||
let mut prev_window = config_rx_ip_limits
|
||||
.borrow()
|
||||
.access
|
||||
.user_max_unique_ips_window_secs;
|
||||
|
||||
loop {
|
||||
if config_rx_ip_limits.changed().await.is_err() {
|
||||
break;
|
||||
}
|
||||
let cfg = config_rx_ip_limits.borrow_and_update().clone();
|
||||
|
||||
if prev_limits != cfg.access.user_max_unique_ips
|
||||
|| prev_global_each != cfg.access.user_max_unique_ips_global_each
|
||||
{
|
||||
ip_tracker_policy
|
||||
.load_limits(
|
||||
cfg.access.user_max_unique_ips_global_each,
|
||||
&cfg.access.user_max_unique_ips,
|
||||
)
|
||||
.await;
|
||||
prev_limits = cfg.access.user_max_unique_ips.clone();
|
||||
prev_global_each = cfg.access.user_max_unique_ips_global_each;
|
||||
}
|
||||
|
||||
if prev_mode != cfg.access.user_max_unique_ips_mode
|
||||
|| prev_window != cfg.access.user_max_unique_ips_window_secs
|
||||
{
|
||||
ip_tracker_policy
|
||||
.set_limit_policy(
|
||||
cfg.access.user_max_unique_ips_mode,
|
||||
cfg.access.user_max_unique_ips_window_secs,
|
||||
)
|
||||
.await;
|
||||
prev_mode = cfg.access.user_max_unique_ips_mode;
|
||||
prev_window = cfg.access.user_max_unique_ips_window_secs;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let beobachten_writer = beobachten.clone();
|
||||
let config_rx_beobachten = config_rx.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let cfg = config_rx_beobachten.borrow().clone();
|
||||
let sleep_secs = cfg.general.beobachten_flush_secs.max(1);
|
||||
|
||||
if cfg.general.beobachten {
|
||||
let ttl = std::time::Duration::from_secs(
|
||||
cfg.general.beobachten_minutes.saturating_mul(60),
|
||||
);
|
||||
let path = cfg.general.beobachten_file.clone();
|
||||
let snapshot = beobachten_writer.snapshot_text(ttl);
|
||||
if let Err(e) = write_beobachten_snapshot(&path, &snapshot).await {
|
||||
warn!(error = %e, path = %path, "Failed to flush beobachten snapshot");
|
||||
}
|
||||
}
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_secs(sleep_secs)).await;
|
||||
}
|
||||
});
|
||||
|
||||
if let Some(pool) = me_pool {
|
||||
let reinit_trigger_capacity = config.general.me_reinit_trigger_channel.max(1);
|
||||
let (reinit_tx, reinit_rx) = mpsc::channel::<MeReinitTrigger>(reinit_trigger_capacity);
|
||||
|
||||
let pool_clone_sched = pool.clone();
|
||||
let rng_clone_sched = rng.clone();
|
||||
let config_rx_clone_sched = config_rx.clone();
|
||||
tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_reinit_scheduler(
|
||||
pool_clone_sched,
|
||||
rng_clone_sched,
|
||||
config_rx_clone_sched,
|
||||
reinit_rx,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
|
||||
let pool_clone = pool.clone();
|
||||
let config_rx_clone = config_rx.clone();
|
||||
let reinit_tx_updater = reinit_tx.clone();
|
||||
tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_config_updater(
|
||||
pool_clone,
|
||||
config_rx_clone,
|
||||
reinit_tx_updater,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
|
||||
let config_rx_clone_rot = config_rx.clone();
|
||||
let reinit_tx_rotation = reinit_tx.clone();
|
||||
tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_rotation_task(
|
||||
config_rx_clone_rot,
|
||||
reinit_tx_rotation,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
}
|
||||
|
||||
RuntimeWatches {
|
||||
config_rx,
|
||||
log_level_rx,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn apply_runtime_log_filter(
|
||||
has_rust_log: bool,
|
||||
effective_log_level: &LogLevel,
|
||||
filter_handle: reload::Handle<EnvFilter, tracing_subscriber::Registry>,
|
||||
mut log_level_rx: watch::Receiver<LogLevel>,
|
||||
) {
|
||||
let runtime_filter = if has_rust_log {
|
||||
EnvFilter::from_default_env()
|
||||
} else if matches!(effective_log_level, LogLevel::Silent) {
|
||||
EnvFilter::new("warn,telemt::links=info")
|
||||
} else {
|
||||
EnvFilter::new(effective_log_level.to_filter_str())
|
||||
};
|
||||
filter_handle
|
||||
.reload(runtime_filter)
|
||||
.expect("Failed to switch log filter");
|
||||
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if log_level_rx.changed().await.is_err() {
|
||||
break;
|
||||
}
|
||||
let level = log_level_rx.borrow_and_update().clone();
|
||||
let new_filter = tracing_subscriber::EnvFilter::new(level.to_filter_str());
|
||||
if let Err(e) = filter_handle.reload(new_filter) {
|
||||
tracing::error!("config reload: failed to update log filter: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
pub(crate) async fn spawn_metrics_if_configured(
|
||||
config: &Arc<ProxyConfig>,
|
||||
startup_tracker: &Arc<StartupTracker>,
|
||||
stats: Arc<Stats>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
) {
|
||||
// metrics_listen takes precedence; fall back to metrics_port for backward compat.
|
||||
let metrics_target: Option<(u16, Option<String>)> =
|
||||
if let Some(ref listen) = config.server.metrics_listen {
|
||||
match listen.parse::<std::net::SocketAddr>() {
|
||||
Ok(addr) => Some((addr.port(), Some(listen.clone()))),
|
||||
Err(e) => {
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_METRICS_START,
|
||||
Some(format!("invalid metrics_listen \"{}\": {}", listen, e)),
|
||||
)
|
||||
.await;
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
config.server.metrics_port.map(|p| (p, None))
|
||||
};
|
||||
|
||||
if let Some((port, listen)) = metrics_target {
|
||||
let fallback_label = format!("port {}", port);
|
||||
let label = listen.as_deref().unwrap_or(&fallback_label);
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_METRICS_START,
|
||||
Some(format!("spawn metrics endpoint on {}", label)),
|
||||
)
|
||||
.await;
|
||||
let stats = stats.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
let config_rx_metrics = config_rx.clone();
|
||||
let ip_tracker_metrics = ip_tracker.clone();
|
||||
let whitelist = config.server.metrics_whitelist.clone();
|
||||
let listen_backlog = config.server.listen_backlog;
|
||||
tokio::spawn(async move {
|
||||
metrics::serve(
|
||||
port,
|
||||
listen,
|
||||
listen_backlog,
|
||||
stats,
|
||||
beobachten,
|
||||
ip_tracker_metrics,
|
||||
config_rx_metrics,
|
||||
whitelist,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_METRICS_START,
|
||||
Some("metrics task spawned".to_string()),
|
||||
)
|
||||
.await;
|
||||
} else if config.server.metrics_listen.is_none() {
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_METRICS_START,
|
||||
Some("server.metrics_port is not configured".to_string()),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn mark_runtime_ready(startup_tracker: &Arc<StartupTracker>) {
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_RUNTIME_READY,
|
||||
Some("startup pipeline is fully initialized".to_string()),
|
||||
)
|
||||
.await;
|
||||
startup_tracker.mark_ready().await;
|
||||
}
|
||||
|
|
@ -0,0 +1,206 @@
|
|||
//! Shutdown and signal handling for telemt.
|
||||
//!
|
||||
//! Handles graceful shutdown on various signals:
|
||||
//! - SIGINT (Ctrl+C) / SIGTERM: Graceful shutdown
|
||||
//! - SIGQUIT: Graceful shutdown with stats dump
|
||||
//! - SIGUSR1: Reserved for log rotation (logs acknowledgment)
|
||||
//! - SIGUSR2: Dump runtime status to log
|
||||
//!
|
||||
//! SIGHUP is handled separately in config/hot_reload.rs for config reload.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[cfg(not(unix))]
|
||||
use tokio::signal;
|
||||
#[cfg(unix)]
|
||||
use tokio::signal::unix::{SignalKind, signal};
|
||||
use tracing::{info, warn};
|
||||
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
|
||||
use super::helpers::{format_uptime, unit_label};
|
||||
|
||||
/// Signal that triggered shutdown.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ShutdownSignal {
|
||||
/// SIGINT (Ctrl+C)
|
||||
Interrupt,
|
||||
/// SIGTERM
|
||||
Terminate,
|
||||
/// SIGQUIT (with stats dump)
|
||||
Quit,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ShutdownSignal {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ShutdownSignal::Interrupt => write!(f, "SIGINT"),
|
||||
ShutdownSignal::Terminate => write!(f, "SIGTERM"),
|
||||
ShutdownSignal::Quit => write!(f, "SIGQUIT"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Waits for a shutdown signal and performs graceful shutdown.
|
||||
pub(crate) async fn wait_for_shutdown(
|
||||
process_started_at: Instant,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
stats: Arc<Stats>,
|
||||
) {
|
||||
let signal = wait_for_shutdown_signal().await;
|
||||
perform_shutdown(signal, process_started_at, me_pool, &stats).await;
|
||||
}
|
||||
|
||||
/// Waits for any shutdown signal (SIGINT, SIGTERM, SIGQUIT).
|
||||
#[cfg(unix)]
|
||||
async fn wait_for_shutdown_signal() -> ShutdownSignal {
|
||||
let mut sigint = signal(SignalKind::interrupt()).expect("Failed to register SIGINT handler");
|
||||
let mut sigterm = signal(SignalKind::terminate()).expect("Failed to register SIGTERM handler");
|
||||
let mut sigquit = signal(SignalKind::quit()).expect("Failed to register SIGQUIT handler");
|
||||
|
||||
tokio::select! {
|
||||
_ = sigint.recv() => ShutdownSignal::Interrupt,
|
||||
_ = sigterm.recv() => ShutdownSignal::Terminate,
|
||||
_ = sigquit.recv() => ShutdownSignal::Quit,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
async fn wait_for_shutdown_signal() -> ShutdownSignal {
|
||||
signal::ctrl_c().await.expect("Failed to listen for Ctrl+C");
|
||||
ShutdownSignal::Interrupt
|
||||
}
|
||||
|
||||
/// Performs graceful shutdown sequence.
|
||||
async fn perform_shutdown(
|
||||
signal: ShutdownSignal,
|
||||
process_started_at: Instant,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
stats: &Stats,
|
||||
) {
|
||||
let shutdown_started_at = Instant::now();
|
||||
info!(signal = %signal, "Received shutdown signal");
|
||||
|
||||
// Dump stats if SIGQUIT
|
||||
if signal == ShutdownSignal::Quit {
|
||||
dump_stats(stats, process_started_at);
|
||||
}
|
||||
|
||||
info!("Shutting down...");
|
||||
let uptime_secs = process_started_at.elapsed().as_secs();
|
||||
info!("Uptime: {}", format_uptime(uptime_secs));
|
||||
|
||||
// Graceful ME pool shutdown
|
||||
if let Some(pool) = &me_pool {
|
||||
match tokio::time::timeout(Duration::from_secs(2), pool.shutdown_send_close_conn_all())
|
||||
.await
|
||||
{
|
||||
Ok(total) => {
|
||||
info!(
|
||||
close_conn_sent = total,
|
||||
"ME shutdown: RPC_CLOSE_CONN broadcast completed"
|
||||
);
|
||||
}
|
||||
Err(_) => {
|
||||
warn!("ME shutdown: RPC_CLOSE_CONN broadcast timed out");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let shutdown_secs = shutdown_started_at.elapsed().as_secs();
|
||||
info!(
|
||||
"Shutdown completed successfully in {} {}.",
|
||||
shutdown_secs,
|
||||
unit_label(shutdown_secs, "second", "seconds")
|
||||
);
|
||||
}
|
||||
|
||||
/// Dumps runtime statistics to the log.
|
||||
fn dump_stats(stats: &Stats, process_started_at: Instant) {
|
||||
let uptime_secs = process_started_at.elapsed().as_secs();
|
||||
|
||||
info!("=== Runtime Statistics Dump ===");
|
||||
info!("Uptime: {}", format_uptime(uptime_secs));
|
||||
|
||||
// Connection stats
|
||||
info!(
|
||||
"Connections: total={}, current={} (direct={}, me={}), bad={}",
|
||||
stats.get_connects_all(),
|
||||
stats.get_current_connections_total(),
|
||||
stats.get_current_connections_direct(),
|
||||
stats.get_current_connections_me(),
|
||||
stats.get_connects_bad(),
|
||||
);
|
||||
|
||||
// ME pool stats
|
||||
info!(
|
||||
"ME keepalive: sent={}, pong={}, failed={}, timeout={}",
|
||||
stats.get_me_keepalive_sent(),
|
||||
stats.get_me_keepalive_pong(),
|
||||
stats.get_me_keepalive_failed(),
|
||||
stats.get_me_keepalive_timeout(),
|
||||
);
|
||||
|
||||
// Relay stats
|
||||
info!(
|
||||
"Relay idle: soft_mark={}, hard_close={}, pressure_evict={}",
|
||||
stats.get_relay_idle_soft_mark_total(),
|
||||
stats.get_relay_idle_hard_close_total(),
|
||||
stats.get_relay_pressure_evict_total(),
|
||||
);
|
||||
|
||||
info!("=== End Statistics Dump ===");
|
||||
}
|
||||
|
||||
/// Spawns a background task to handle operational signals (SIGUSR1, SIGUSR2).
|
||||
///
|
||||
/// These signals don't trigger shutdown but perform specific actions:
|
||||
/// - SIGUSR1: Log rotation acknowledgment (for external log rotation tools)
|
||||
/// - SIGUSR2: Dump runtime status to log
|
||||
#[cfg(unix)]
|
||||
pub(crate) fn spawn_signal_handlers(stats: Arc<Stats>, process_started_at: Instant) {
|
||||
tokio::spawn(async move {
|
||||
let mut sigusr1 =
|
||||
signal(SignalKind::user_defined1()).expect("Failed to register SIGUSR1 handler");
|
||||
let mut sigusr2 =
|
||||
signal(SignalKind::user_defined2()).expect("Failed to register SIGUSR2 handler");
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = sigusr1.recv() => {
|
||||
handle_sigusr1();
|
||||
}
|
||||
_ = sigusr2.recv() => {
|
||||
handle_sigusr2(&stats, process_started_at);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// No-op on non-Unix platforms.
|
||||
#[cfg(not(unix))]
|
||||
pub(crate) fn spawn_signal_handlers(_stats: Arc<Stats>, _process_started_at: Instant) {
|
||||
// No SIGUSR1/SIGUSR2 on non-Unix
|
||||
}
|
||||
|
||||
/// Handles SIGUSR1 - log rotation signal.
|
||||
///
|
||||
/// This signal is typically sent by logrotate or similar tools after
|
||||
/// rotating log files. Since tracing-subscriber doesn't natively support
|
||||
/// reopening files, we just acknowledge the signal. If file logging is
|
||||
/// added in the future, this would reopen log file handles.
|
||||
#[cfg(unix)]
|
||||
fn handle_sigusr1() {
|
||||
info!("SIGUSR1 received - log rotation acknowledged");
|
||||
// Future: If using file-based logging, reopen file handles here
|
||||
}
|
||||
|
||||
/// Handles SIGUSR2 - dump runtime status.
|
||||
#[cfg(unix)]
|
||||
fn handle_sigusr2(stats: &Stats, process_started_at: Instant) {
|
||||
info!("SIGUSR2 received - dumping runtime status");
|
||||
dump_stats(stats, process_started_at);
|
||||
}
|
||||
|
|
@ -0,0 +1,188 @@
|
|||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use rand::RngExt;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::startup::{COMPONENT_TLS_FRONT_BOOTSTRAP, StartupTracker};
|
||||
use crate::tls_front::TlsFrontCache;
|
||||
use crate::tls_front::fetcher::TlsFetchStrategy;
|
||||
use crate::transport::UpstreamManager;
|
||||
|
||||
pub(crate) async fn bootstrap_tls_front(
|
||||
config: &ProxyConfig,
|
||||
tls_domains: &[String],
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
startup_tracker: &Arc<StartupTracker>,
|
||||
) -> Option<Arc<TlsFrontCache>> {
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_TLS_FRONT_BOOTSTRAP,
|
||||
Some("initialize TLS front cache/bootstrap tasks".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
let tls_cache: Option<Arc<TlsFrontCache>> = if config.censorship.tls_emulation {
|
||||
let cache = Arc::new(TlsFrontCache::new(
|
||||
tls_domains,
|
||||
config.censorship.fake_cert_len,
|
||||
&config.censorship.tls_front_dir,
|
||||
));
|
||||
cache.load_from_disk().await;
|
||||
|
||||
let port = config.censorship.mask_port;
|
||||
let proxy_protocol = config.censorship.mask_proxy_protocol;
|
||||
let mask_host = config
|
||||
.censorship
|
||||
.mask_host
|
||||
.clone()
|
||||
.unwrap_or_else(|| config.censorship.tls_domain.clone());
|
||||
let mask_unix_sock = config.censorship.mask_unix_sock.clone();
|
||||
let tls_fetch_scope = (!config.censorship.tls_fetch_scope.is_empty())
|
||||
.then(|| config.censorship.tls_fetch_scope.clone());
|
||||
let tls_fetch = config.censorship.tls_fetch.clone();
|
||||
let fetch_strategy = TlsFetchStrategy {
|
||||
profiles: tls_fetch.profiles,
|
||||
strict_route: tls_fetch.strict_route,
|
||||
attempt_timeout: Duration::from_millis(tls_fetch.attempt_timeout_ms.max(1)),
|
||||
total_budget: Duration::from_millis(tls_fetch.total_budget_ms.max(1)),
|
||||
grease_enabled: tls_fetch.grease_enabled,
|
||||
deterministic: tls_fetch.deterministic,
|
||||
profile_cache_ttl: Duration::from_secs(tls_fetch.profile_cache_ttl_secs),
|
||||
};
|
||||
let fetch_timeout = fetch_strategy.total_budget;
|
||||
|
||||
let cache_initial = cache.clone();
|
||||
let domains_initial = tls_domains.to_vec();
|
||||
let host_initial = mask_host.clone();
|
||||
let unix_sock_initial = mask_unix_sock.clone();
|
||||
let scope_initial = tls_fetch_scope.clone();
|
||||
let upstream_initial = upstream_manager.clone();
|
||||
let strategy_initial = fetch_strategy.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut join = tokio::task::JoinSet::new();
|
||||
for domain in domains_initial {
|
||||
let cache_domain = cache_initial.clone();
|
||||
let host_domain = host_initial.clone();
|
||||
let unix_sock_domain = unix_sock_initial.clone();
|
||||
let scope_domain = scope_initial.clone();
|
||||
let upstream_domain = upstream_initial.clone();
|
||||
let strategy_domain = strategy_initial.clone();
|
||||
join.spawn(async move {
|
||||
match crate::tls_front::fetcher::fetch_real_tls_with_strategy(
|
||||
&host_domain,
|
||||
port,
|
||||
&domain,
|
||||
&strategy_domain,
|
||||
Some(upstream_domain),
|
||||
scope_domain.as_deref(),
|
||||
proxy_protocol,
|
||||
unix_sock_domain.as_deref(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(res) => cache_domain.update_from_fetch(&domain, res).await,
|
||||
Err(e) => {
|
||||
warn!(domain = %domain, error = %e, "TLS emulation initial fetch failed")
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
while let Some(res) = join.join_next().await {
|
||||
if let Err(e) = res {
|
||||
warn!(error = %e, "TLS emulation initial fetch task join failed");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let cache_timeout = cache.clone();
|
||||
let domains_timeout = tls_domains.to_vec();
|
||||
let fake_cert_len = config.censorship.fake_cert_len;
|
||||
tokio::spawn(async move {
|
||||
tokio::time::sleep(fetch_timeout).await;
|
||||
for domain in domains_timeout {
|
||||
let cached = cache_timeout.get(&domain).await;
|
||||
if cached.domain == "default" {
|
||||
warn!(
|
||||
domain = %domain,
|
||||
timeout_secs = fetch_timeout.as_secs(),
|
||||
fake_cert_len,
|
||||
"TLS-front fetch not ready within timeout; using cache/default fake cert fallback"
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let cache_refresh = cache.clone();
|
||||
let domains_refresh = tls_domains.to_vec();
|
||||
let host_refresh = mask_host.clone();
|
||||
let unix_sock_refresh = mask_unix_sock.clone();
|
||||
let scope_refresh = tls_fetch_scope.clone();
|
||||
let upstream_refresh = upstream_manager.clone();
|
||||
let strategy_refresh = fetch_strategy.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let base_secs = rand::rng().random_range(4 * 3600..=6 * 3600);
|
||||
let jitter_secs = rand::rng().random_range(0..=7200);
|
||||
tokio::time::sleep(Duration::from_secs(base_secs + jitter_secs)).await;
|
||||
|
||||
let mut join = tokio::task::JoinSet::new();
|
||||
for domain in domains_refresh.clone() {
|
||||
let cache_domain = cache_refresh.clone();
|
||||
let host_domain = host_refresh.clone();
|
||||
let unix_sock_domain = unix_sock_refresh.clone();
|
||||
let scope_domain = scope_refresh.clone();
|
||||
let upstream_domain = upstream_refresh.clone();
|
||||
let strategy_domain = strategy_refresh.clone();
|
||||
join.spawn(async move {
|
||||
match crate::tls_front::fetcher::fetch_real_tls_with_strategy(
|
||||
&host_domain,
|
||||
port,
|
||||
&domain,
|
||||
&strategy_domain,
|
||||
Some(upstream_domain),
|
||||
scope_domain.as_deref(),
|
||||
proxy_protocol,
|
||||
unix_sock_domain.as_deref(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(res) => cache_domain.update_from_fetch(&domain, res).await,
|
||||
Err(e) => {
|
||||
warn!(domain = %domain, error = %e, "TLS emulation refresh failed")
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
while let Some(res) = join.join_next().await {
|
||||
if let Err(e) = res {
|
||||
warn!(error = %e, "TLS emulation refresh task join failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Some(cache)
|
||||
} else {
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_TLS_FRONT_BOOTSTRAP,
|
||||
Some("censorship.tls_emulation is false".to_string()),
|
||||
)
|
||||
.await;
|
||||
None
|
||||
};
|
||||
|
||||
if tls_cache.is_some() {
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_TLS_FRONT_BOOTSTRAP,
|
||||
Some("tls front cache is initialized".to_string()),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
tls_cache
|
||||
}
|
||||
1275
src/main.rs
1275
src/main.rs
File diff suppressed because it is too large
Load Diff
2125
src/metrics.rs
2125
src/metrics.rs
File diff suppressed because it is too large
Load Diff
|
|
@ -26,9 +26,7 @@ fn parse_ip_spec(ip_spec: &str) -> Result<IpAddr> {
|
|||
}
|
||||
|
||||
let ip = ip_spec.parse::<IpAddr>().map_err(|_| {
|
||||
ProxyError::Config(format!(
|
||||
"network.dns_overrides IP is invalid: '{ip_spec}'"
|
||||
))
|
||||
ProxyError::Config(format!("network.dns_overrides IP is invalid: '{ip_spec}'"))
|
||||
})?;
|
||||
if matches!(ip, IpAddr::V6(_)) {
|
||||
return Err(ProxyError::Config(format!(
|
||||
|
|
@ -103,9 +101,9 @@ pub fn validate_entries(entries: &[String]) -> Result<()> {
|
|||
/// Replace runtime DNS overrides with a new validated snapshot.
|
||||
pub fn install_entries(entries: &[String]) -> Result<()> {
|
||||
let parsed = parse_entries(entries)?;
|
||||
let mut guard = overrides_store()
|
||||
.write()
|
||||
.map_err(|_| ProxyError::Config("network.dns_overrides runtime lock is poisoned".to_string()))?;
|
||||
let mut guard = overrides_store().write().map_err(|_| {
|
||||
ProxyError::Config("network.dns_overrides runtime lock is poisoned".to_string())
|
||||
})?;
|
||||
*guard = parsed;
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue