mirror of
https://github.com/telemt/telemt.git
synced 2026-04-23 13:34:11 +03:00
Compare commits
402 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b8da986fd5 | ||
|
|
dd270258bf | ||
|
|
40dc6a39c1 | ||
|
|
8b5cbb7b4b | ||
|
|
0e476c71a5 | ||
|
|
be24b47300 | ||
|
|
8cd719da3f | ||
|
|
959d385015 | ||
|
|
6fa01d4c36 | ||
|
|
a383f3f1a3 | ||
|
|
7635aad1cb | ||
|
|
b315e84136 | ||
|
|
1d8de09a32 | ||
|
|
d2db9b8cf9 | ||
|
|
796279343e | ||
|
|
fabb3c45f1 | ||
|
|
161af51558 | ||
|
|
100ef0fa28 | ||
|
|
8994c27714 | ||
|
|
b950987229 | ||
|
|
a09b597fab | ||
|
|
c920dc6381 | ||
|
|
f4418d2d50 | ||
|
|
5ab3170f69 | ||
|
|
76fa06fa2e | ||
|
|
3a997fcf71 | ||
|
|
4b49b1b4f0 | ||
|
|
97926b05e8 | ||
|
|
6de17ae830 | ||
|
|
4c94f73546 | ||
|
|
d99df37ac5 | ||
|
|
d0f253b49b | ||
|
|
ef2ed3daa0 | ||
|
|
fc52cad109 | ||
|
|
98f365be44 | ||
|
|
b6c3cae2ad | ||
|
|
5f7fb15dd8 | ||
|
|
3a89f16332 | ||
|
|
aa3fcfbbe1 | ||
|
|
a616775f6d | ||
|
|
633af93b19 | ||
|
|
b41257f54e | ||
|
|
76b28aea74 | ||
|
|
aa315f5d72 | ||
|
|
c28b82a618 | ||
|
|
e7bdc80956 | ||
|
|
d641137537 | ||
|
|
4fd22b3219 | ||
|
|
fca0e3f619 | ||
|
|
9401c46727 | ||
|
|
6b3697ee87 | ||
|
|
c08160600e | ||
|
|
cd5c60ce1e | ||
|
|
ae1c97e27a | ||
|
|
cfee7de66b | ||
|
|
c942c492ad | ||
|
|
0e4be43b2b | ||
|
|
7eb2b60855 | ||
|
|
373ae3281e | ||
|
|
178630e3bf | ||
|
|
67f307cd43 | ||
|
|
ca2eaa9ead | ||
|
|
3c78daea0c | ||
|
|
d2baa8e721 | ||
|
|
a0cf4b4713 | ||
|
|
1bd249b0a9 | ||
|
|
2f47ec5797 | ||
|
|
80f3661b8e | ||
|
|
32eeb4a98c | ||
|
|
a74cc14ed9 | ||
|
|
5f77f83b48 | ||
|
|
d543dbca92 | ||
|
|
02f9d59f5a | ||
|
|
7b745bc7bc | ||
|
|
5ac0ef1ffd | ||
|
|
e1f3efb619 | ||
|
|
508eea0131 | ||
|
|
9e7f80b9b3 | ||
|
|
ee2def2e62 | ||
|
|
258191ab87 | ||
|
|
27e6dec018 | ||
|
|
26323dbebf | ||
|
|
484137793f | ||
|
|
24713feddc | ||
|
|
93f58524d1 | ||
|
|
0ff2e95e49 | ||
|
|
89222e7123 | ||
|
|
2468ee15e7 | ||
|
|
3440aa9fcd | ||
|
|
ce9698d39b | ||
|
|
ddfe7c5cfa | ||
|
|
01893f3712 | ||
|
|
8ae741ec72 | ||
|
|
6856466cef | ||
|
|
68292fbd26 | ||
|
|
e90c42ae68 | ||
|
|
9f9a5dce0d | ||
|
|
6739cd8d01 | ||
|
|
6cc8d9cb00 | ||
|
|
ce375b62e4 | ||
|
|
95971ac62c | ||
|
|
4ea2226dcd | ||
|
|
d752a440e5 | ||
|
|
5ce2ee2dae | ||
|
|
6fd9f0595d | ||
|
|
fcdd8a9796 | ||
|
|
640468d4e7 | ||
|
|
02fe89f7d0 | ||
|
|
24df865503 | ||
|
|
e9f8c79498 | ||
|
|
24ff75701e | ||
|
|
4221230969 | ||
|
|
d87196c105 | ||
|
|
da89415961 | ||
|
|
2d98ebf3c3 | ||
|
|
fb5e9947bd | ||
|
|
2ea85c00d3 | ||
|
|
2a3b6b917f | ||
|
|
83ed9065b0 | ||
|
|
44b825edf5 | ||
|
|
487e95a66e | ||
|
|
c465c200c4 | ||
|
|
d7716ad875 | ||
|
|
edce194948 | ||
|
|
13fdff750d | ||
|
|
bdcf110c87 | ||
|
|
dd12997744 | ||
|
|
fc160913bf | ||
|
|
92c22ef16d | ||
|
|
aff22d0855 | ||
|
|
b3d3bca15a | ||
|
|
92f38392eb | ||
|
|
30ef8df1b3 | ||
|
|
2e174adf16 | ||
|
|
4e803b1412 | ||
|
|
9b174318ce | ||
|
|
99edcbe818 | ||
|
|
ef7dc2b80f | ||
|
|
691607f269 | ||
|
|
55561a23bc | ||
|
|
f32c34f126 | ||
|
|
8f3bdaec2c | ||
|
|
69b02caf77 | ||
|
|
3854955069 | ||
|
|
9b84fc7a5b | ||
|
|
e7cb9238dc | ||
|
|
0e2cbe6178 | ||
|
|
cd076aeeeb | ||
|
|
d683faf922 | ||
|
|
0494f8ac8b | ||
|
|
48ce59900e | ||
|
|
84e95fd229 | ||
|
|
a80be78345 | ||
|
|
64130dd02e | ||
|
|
d62a6e0417 | ||
|
|
3260746785 | ||
|
|
8066ea2163 | ||
|
|
813f1df63e | ||
|
|
09bdafa718 | ||
|
|
fb0f75df43 | ||
|
|
39255df549 | ||
|
|
456495fd62 | ||
|
|
83cadc0bf3 | ||
|
|
0b1a8cd3f8 | ||
|
|
565b4ee923 | ||
|
|
7a9c1e79c2 | ||
|
|
02c6af4912 | ||
|
|
8ba4dea59f | ||
|
|
ccfda10713 | ||
|
|
bd1327592e | ||
|
|
30b22fe2bf | ||
|
|
651f257a5d | ||
|
|
a9209fd3c7 | ||
|
|
4ae4ca8ca8 | ||
|
|
8be1ddc0d8 | ||
|
|
b55fa5ec8f | ||
|
|
16c6ce850e | ||
|
|
12251e730f | ||
|
|
925b10f9fc | ||
|
|
306b653318 | ||
|
|
8791a52b7e | ||
|
|
0d9470a840 | ||
|
|
0d320c20e0 | ||
|
|
9b3ba2e1c6 | ||
|
|
dbadbf0221 | ||
|
|
173624c838 | ||
|
|
de2047adf2 | ||
|
|
5df2fe9f97 | ||
|
|
2510ebaa79 | ||
|
|
314f30a434 | ||
|
|
c86a511638 | ||
|
|
f1efaf4491 | ||
|
|
716b4adef2 | ||
|
|
5876623bb0 | ||
|
|
6b9c7f7862 | ||
|
|
7ea6387278 | ||
|
|
4c2bc2f41f | ||
|
|
c86f35f059 | ||
|
|
3492566842 | ||
|
|
349bbbb8fa | ||
|
|
ead08981e7 | ||
|
|
068cf825b9 | ||
|
|
7269dfbdc5 | ||
|
|
533708f885 | ||
|
|
5e93ce258f | ||
|
|
1236505502 | ||
|
|
f7d451e689 | ||
|
|
e11da6d2ae | ||
|
|
d31b4cd6c8 | ||
|
|
f4ec6bb303 | ||
|
|
a6132bac38 | ||
|
|
624870109e | ||
|
|
cdf829de91 | ||
|
|
6ef51dbfb0 | ||
|
|
af5f0b9692 | ||
|
|
bd0dcfff15 | ||
|
|
ec4e48808e | ||
|
|
c293901669 | ||
|
|
f4e5a08614 | ||
|
|
430a0ae6b4 | ||
|
|
53d93880ad | ||
|
|
1706698a83 | ||
|
|
cb0832b803 | ||
|
|
c01ca40b6d | ||
|
|
cfec6dbb3c | ||
|
|
1fe1acadd4 | ||
|
|
225fc3e4ea | ||
|
|
4a0d88ad43 | ||
|
|
58ff0c7971 | ||
|
|
7d39bf1698 | ||
|
|
3b8eea762b | ||
|
|
07ec84d071 | ||
|
|
235642459a | ||
|
|
3799fc13c4 | ||
|
|
71261522bd | ||
|
|
762deac511 | ||
|
|
4300720d35 | ||
|
|
b7a8e759eb | ||
|
|
1a68dc1c2d | ||
|
|
a6d22e8a57 | ||
|
|
9477103f89 | ||
|
|
e589891706 | ||
|
|
fad4b652c4 | ||
|
|
96bfc223fe | ||
|
|
265b9a5f11 | ||
|
|
74ad9037de | ||
|
|
49f4a7bb22 | ||
|
|
ac453638b8 | ||
|
|
e7773b2bda | ||
|
|
6f1980dfd7 | ||
|
|
427fbef50f | ||
|
|
08609f4b6d | ||
|
|
501d802b8d | ||
|
|
e8ff39d2ae | ||
|
|
6c1b837d5b | ||
|
|
b112908c86 | ||
|
|
1e400d4cc2 | ||
|
|
a11c8b659b | ||
|
|
bc432f06e2 | ||
|
|
338636ede6 | ||
|
|
c05779208e | ||
|
|
7ba21ec5a8 | ||
|
|
d997c0b216 | ||
|
|
62cf4f0a1c | ||
|
|
e710fefed2 | ||
|
|
edef06edb5 | ||
|
|
7a0b015e65 | ||
|
|
8b2ec35c46 | ||
|
|
d324d84ec7 | ||
|
|
47b12f9489 | ||
|
|
a5967d0ca3 | ||
|
|
44cdfd4b23 | ||
|
|
25ffcf6081 | ||
|
|
60322807b6 | ||
|
|
ed93b0a030 | ||
|
|
2370c8d5e4 | ||
|
|
a3197b0fe1 | ||
|
|
e27ef04c3d | ||
|
|
cf7e2ebf4b | ||
|
|
685bfafe74 | ||
|
|
0f6fcf49a7 | ||
|
|
036f0e1569 | ||
|
|
291c22583f | ||
|
|
ee5b01bb31 | ||
|
|
ccacf78890 | ||
|
|
42db1191a8 | ||
|
|
9ce26d16cb | ||
|
|
12e68f805f | ||
|
|
62bf31fc73 | ||
|
|
29d4636249 | ||
|
|
9afaa28add | ||
|
|
6c12af2b94 | ||
|
|
8b39a4ef6d | ||
|
|
fa2423dadf | ||
|
|
449a87d2e3 | ||
|
|
a61882af6e | ||
|
|
bf11ebbaa3 | ||
|
|
e0d5561095 | ||
|
|
6b8aa7270e | ||
|
|
372f477927 | ||
|
|
05edbab06c | ||
|
|
3d9660f83e | ||
|
|
ac064fe773 | ||
|
|
eba158ff8b | ||
|
|
54ee6ff810 | ||
|
|
6d6cd30227 | ||
|
|
60231224ac | ||
|
|
144f81c473 | ||
|
|
04e6135935 | ||
|
|
4eebb4feb2 | ||
|
|
1f255d0aa4 | ||
|
|
9d2ff25bf5 | ||
|
|
7782336264 | ||
|
|
92a3529733 | ||
|
|
8ce8348cd5 | ||
|
|
e25b7f5ff8 | ||
|
|
d7182ae817 | ||
|
|
97f2dc8489 | ||
|
|
fb1f85559c | ||
|
|
da684b11fe | ||
|
|
896e129155 | ||
|
|
7ead0cd753 | ||
|
|
6cf9687dd6 | ||
|
|
4e30a4999c | ||
|
|
4af40f7121 | ||
|
|
1e4ba2eb56 | ||
|
|
eb921e2b17 | ||
|
|
76f1b51018 | ||
|
|
03ce267865 | ||
|
|
a6bfa3309e | ||
|
|
79a3720fd5 | ||
|
|
89543aed35 | ||
|
|
06292ff833 | ||
|
|
427294b103 | ||
|
|
fed9346444 | ||
|
|
f40b645c05 | ||
|
|
a66d5d56bb | ||
|
|
1b1bdfe99a | ||
|
|
49fc11ddfa | ||
|
|
5558900c44 | ||
|
|
5b1d976392 | ||
|
|
206f87fe64 | ||
|
|
5a09d30e1c | ||
|
|
f83e23c521 | ||
|
|
f9e9ddd0f7 | ||
|
|
6b8619d3c9 | ||
|
|
618b7a1837 | ||
|
|
16f166cec8 | ||
|
|
6efcbe9bbf | ||
|
|
e5ad27e26e | ||
|
|
53ec96b040 | ||
|
|
c6c3d71b08 | ||
|
|
e9a4281015 | ||
|
|
866c2fbd96 | ||
|
|
086c85d851 | ||
|
|
ce4e21c996 | ||
|
|
25ab79406f | ||
|
|
7538967d3c | ||
|
|
4a95f6d195 | ||
|
|
7d7ef84868 | ||
|
|
692d9476b9 | ||
|
|
b00b87032b | ||
|
|
ee07325eba | ||
|
|
1b3a17aedc | ||
|
|
6fdb568381 | ||
|
|
bb97ff0df9 | ||
|
|
b1cd7f9727 | ||
|
|
c13c1cf7e3 | ||
|
|
d2f08fb707 | ||
|
|
2356ae5584 | ||
|
|
429fa63c95 | ||
|
|
50e15896b3 | ||
|
|
09f56dede2 | ||
|
|
d9ae7bb044 | ||
|
|
d6214c6bbf | ||
|
|
3d3ddd37d7 | ||
|
|
1d71b7e90c | ||
|
|
8ba7bc9052 | ||
|
|
3397d82924 | ||
|
|
78c45626e1 | ||
|
|
68c3abee6c | ||
|
|
267c8bf2f1 | ||
|
|
d38d7f2bee | ||
|
|
8b47fc3575 | ||
|
|
122e4729c5 | ||
|
|
08138451d8 | ||
|
|
267619d276 | ||
|
|
f710a2192a | ||
|
|
b40eed126d | ||
|
|
0e2d42624f | ||
|
|
1f486e0df2 | ||
|
|
a4af254107 | ||
|
|
3f0c53b010 | ||
|
|
890bd98b17 | ||
|
|
02cfe1305c | ||
|
|
81843cc56c | ||
|
|
f86ced8e62 | ||
|
|
e2e471a78c | ||
|
|
9aed6c8631 | ||
|
|
5a0e44e311 | ||
|
|
a917dcc162 | ||
|
|
872b47067a |
135
.github/instructions/rust_rules.instructions.md
vendored
Normal file
135
.github/instructions/rust_rules.instructions.md
vendored
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
---
|
||||||
|
description: 'Rust programming language coding conventions and best practices'
|
||||||
|
applyTo: '**/*.rs'
|
||||||
|
---
|
||||||
|
|
||||||
|
# Rust Coding Conventions and Best Practices
|
||||||
|
|
||||||
|
Follow idiomatic Rust practices and community standards when writing Rust code.
|
||||||
|
|
||||||
|
These instructions are based on [The Rust Book](https://doc.rust-lang.org/book/), [Rust API Guidelines](https://rust-lang.github.io/api-guidelines/), [RFC 430 naming conventions](https://github.com/rust-lang/rfcs/blob/master/text/0430-finalizing-naming-conventions.md), and the broader Rust community at [users.rust-lang.org](https://users.rust-lang.org).
|
||||||
|
|
||||||
|
## General Instructions
|
||||||
|
|
||||||
|
- Always prioritize readability, safety, and maintainability.
|
||||||
|
- Use strong typing and leverage Rust's ownership system for memory safety.
|
||||||
|
- Break down complex functions into smaller, more manageable functions.
|
||||||
|
- For algorithm-related code, include explanations of the approach used.
|
||||||
|
- Write code with good maintainability practices, including comments on why certain design decisions were made.
|
||||||
|
- Handle errors gracefully using `Result<T, E>` and provide meaningful error messages.
|
||||||
|
- For external dependencies, mention their usage and purpose in documentation.
|
||||||
|
- Use consistent naming conventions following [RFC 430](https://github.com/rust-lang/rfcs/blob/master/text/0430-finalizing-naming-conventions.md).
|
||||||
|
- Write idiomatic, safe, and efficient Rust code that follows the borrow checker's rules.
|
||||||
|
- Ensure code compiles without warnings.
|
||||||
|
|
||||||
|
## Patterns to Follow
|
||||||
|
|
||||||
|
- Use modules (`mod`) and public interfaces (`pub`) to encapsulate logic.
|
||||||
|
- Handle errors properly using `?`, `match`, or `if let`.
|
||||||
|
- Use `serde` for serialization and `thiserror` or `anyhow` for custom errors.
|
||||||
|
- Implement traits to abstract services or external dependencies.
|
||||||
|
- Structure async code using `async/await` and `tokio` or `async-std`.
|
||||||
|
- Prefer enums over flags and states for type safety.
|
||||||
|
- Use builders for complex object creation.
|
||||||
|
- Split binary and library code (`main.rs` vs `lib.rs`) for testability and reuse.
|
||||||
|
- Use `rayon` for data parallelism and CPU-bound tasks.
|
||||||
|
- Use iterators instead of index-based loops as they're often faster and safer.
|
||||||
|
- Use `&str` instead of `String` for function parameters when you don't need ownership.
|
||||||
|
- Prefer borrowing and zero-copy operations to avoid unnecessary allocations.
|
||||||
|
|
||||||
|
### Ownership, Borrowing, and Lifetimes
|
||||||
|
|
||||||
|
- Prefer borrowing (`&T`) over cloning unless ownership transfer is necessary.
|
||||||
|
- Use `&mut T` when you need to modify borrowed data.
|
||||||
|
- Explicitly annotate lifetimes when the compiler cannot infer them.
|
||||||
|
- Use `Rc<T>` for single-threaded reference counting and `Arc<T>` for thread-safe reference counting.
|
||||||
|
- Use `RefCell<T>` for interior mutability in single-threaded contexts and `Mutex<T>` or `RwLock<T>` for multi-threaded contexts.
|
||||||
|
|
||||||
|
## Patterns to Avoid
|
||||||
|
|
||||||
|
- Don't use `unwrap()` or `expect()` unless absolutely necessary—prefer proper error handling.
|
||||||
|
- Avoid panics in library code—return `Result` instead.
|
||||||
|
- Don't rely on global mutable state—use dependency injection or thread-safe containers.
|
||||||
|
- Avoid deeply nested logic—refactor with functions or combinators.
|
||||||
|
- Don't ignore warnings—treat them as errors during CI.
|
||||||
|
- Avoid `unsafe` unless required and fully documented.
|
||||||
|
- Don't overuse `clone()`, use borrowing instead of cloning unless ownership transfer is needed.
|
||||||
|
- Avoid premature `collect()`, keep iterators lazy until you actually need the collection.
|
||||||
|
- Avoid unnecessary allocations—prefer borrowing and zero-copy operations.
|
||||||
|
|
||||||
|
## Code Style and Formatting
|
||||||
|
|
||||||
|
- Follow the Rust Style Guide and use `rustfmt` for automatic formatting.
|
||||||
|
- Keep lines under 100 characters when possible.
|
||||||
|
- Place function and struct documentation immediately before the item using `///`.
|
||||||
|
- Use `cargo clippy` to catch common mistakes and enforce best practices.
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
- Use `Result<T, E>` for recoverable errors and `panic!` only for unrecoverable errors.
|
||||||
|
- Prefer `?` operator over `unwrap()` or `expect()` for error propagation.
|
||||||
|
- Create custom error types using `thiserror` or implement `std::error::Error`.
|
||||||
|
- Use `Option<T>` for values that may or may not exist.
|
||||||
|
- Provide meaningful error messages and context.
|
||||||
|
- Error types should be meaningful and well-behaved (implement standard traits).
|
||||||
|
- Validate function arguments and return appropriate errors for invalid input.
|
||||||
|
|
||||||
|
## API Design Guidelines
|
||||||
|
|
||||||
|
### Common Traits Implementation
|
||||||
|
Eagerly implement common traits where appropriate:
|
||||||
|
- `Copy`, `Clone`, `Eq`, `PartialEq`, `Ord`, `PartialOrd`, `Hash`, `Debug`, `Display`, `Default`
|
||||||
|
- Use standard conversion traits: `From`, `AsRef`, `AsMut`
|
||||||
|
- Collections should implement `FromIterator` and `Extend`
|
||||||
|
- Note: `Send` and `Sync` are auto-implemented by the compiler when safe; avoid manual implementation unless using `unsafe` code
|
||||||
|
|
||||||
|
### Type Safety and Predictability
|
||||||
|
- Use newtypes to provide static distinctions
|
||||||
|
- Arguments should convey meaning through types; prefer specific types over generic `bool` parameters
|
||||||
|
- Use `Option<T>` appropriately for truly optional values
|
||||||
|
- Functions with a clear receiver should be methods
|
||||||
|
- Only smart pointers should implement `Deref` and `DerefMut`
|
||||||
|
|
||||||
|
### Future Proofing
|
||||||
|
- Use sealed traits to protect against downstream implementations
|
||||||
|
- Structs should have private fields
|
||||||
|
- Functions should validate their arguments
|
||||||
|
- All public types must implement `Debug`
|
||||||
|
|
||||||
|
## Testing and Documentation
|
||||||
|
|
||||||
|
- Write comprehensive unit tests using `#[cfg(test)]` modules and `#[test]` annotations.
|
||||||
|
- Use test modules alongside the code they test (`mod tests { ... }`).
|
||||||
|
- Write integration tests in `tests/` directory with descriptive filenames.
|
||||||
|
- Write clear and concise comments for each function, struct, enum, and complex logic.
|
||||||
|
- Ensure functions have descriptive names and include comprehensive documentation.
|
||||||
|
- Document all public APIs with rustdoc (`///` comments) following the [API Guidelines](https://rust-lang.github.io/api-guidelines/).
|
||||||
|
- Use `#[doc(hidden)]` to hide implementation details from public documentation.
|
||||||
|
- Document error conditions, panic scenarios, and safety considerations.
|
||||||
|
- Examples should use `?` operator, not `unwrap()` or deprecated `try!` macro.
|
||||||
|
|
||||||
|
## Project Organization
|
||||||
|
|
||||||
|
- Use semantic versioning in `Cargo.toml`.
|
||||||
|
- Include comprehensive metadata: `description`, `license`, `repository`, `keywords`, `categories`.
|
||||||
|
- Use feature flags for optional functionality.
|
||||||
|
- Organize code into modules using `mod.rs` or named files.
|
||||||
|
- Keep `main.rs` or `lib.rs` minimal - move logic to modules.
|
||||||
|
|
||||||
|
## Quality Checklist
|
||||||
|
|
||||||
|
Before publishing or reviewing Rust code, ensure:
|
||||||
|
|
||||||
|
### Core Requirements
|
||||||
|
- [ ] **Naming**: Follows RFC 430 naming conventions
|
||||||
|
- [ ] **Traits**: Implements `Debug`, `Clone`, `PartialEq` where appropriate
|
||||||
|
- [ ] **Error Handling**: Uses `Result<T, E>` and provides meaningful error types
|
||||||
|
- [ ] **Documentation**: All public items have rustdoc comments with examples
|
||||||
|
- [ ] **Testing**: Comprehensive test coverage including edge cases
|
||||||
|
|
||||||
|
### Safety and Quality
|
||||||
|
- [ ] **Safety**: No unnecessary `unsafe` code, proper error handling
|
||||||
|
- [ ] **Performance**: Efficient use of iterators, minimal allocations
|
||||||
|
- [ ] **API Design**: Functions are predictable, flexible, and type-safe
|
||||||
|
- [ ] **Future Proofing**: Private fields in structs, sealed traits where appropriate
|
||||||
|
- [ ] **Tooling**: Code passes `cargo fmt`, `cargo clippy`, and `cargo test`
|
||||||
162
.github/instructions/self-explanatory-code-commenting.instructions.md
vendored
Normal file
162
.github/instructions/self-explanatory-code-commenting.instructions.md
vendored
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
---
|
||||||
|
description: 'Guidelines for GitHub Copilot to write comments to achieve self-explanatory code with less comments. Examples are in JavaScript but it should work on any language that has comments.'
|
||||||
|
applyTo: '**'
|
||||||
|
---
|
||||||
|
|
||||||
|
# Self-explanatory Code Commenting Instructions
|
||||||
|
|
||||||
|
## Core Principle
|
||||||
|
**Write code that speaks for itself. Comment only when necessary to explain WHY, not WHAT.**
|
||||||
|
We do not need comments most of the time.
|
||||||
|
|
||||||
|
## Commenting Guidelines
|
||||||
|
|
||||||
|
### ❌ AVOID These Comment Types
|
||||||
|
|
||||||
|
**Obvious Comments**
|
||||||
|
```javascript
|
||||||
|
// Bad: States the obvious
|
||||||
|
let counter = 0; // Initialize counter to zero
|
||||||
|
counter++; // Increment counter by one
|
||||||
|
```
|
||||||
|
|
||||||
|
**Redundant Comments**
|
||||||
|
```javascript
|
||||||
|
// Bad: Comment repeats the code
|
||||||
|
function getUserName() {
|
||||||
|
return user.name; // Return the user's name
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Outdated Comments**
|
||||||
|
```javascript
|
||||||
|
// Bad: Comment doesn't match the code
|
||||||
|
// Calculate tax at 5% rate
|
||||||
|
const tax = price * 0.08; // Actually 8%
|
||||||
|
```
|
||||||
|
|
||||||
|
### ✅ WRITE These Comment Types
|
||||||
|
|
||||||
|
**Complex Business Logic**
|
||||||
|
```javascript
|
||||||
|
// Good: Explains WHY this specific calculation
|
||||||
|
// Apply progressive tax brackets: 10% up to 10k, 20% above
|
||||||
|
const tax = calculateProgressiveTax(income, [0.10, 0.20], [10000]);
|
||||||
|
```
|
||||||
|
|
||||||
|
**Non-obvious Algorithms**
|
||||||
|
```javascript
|
||||||
|
// Good: Explains the algorithm choice
|
||||||
|
// Using Floyd-Warshall for all-pairs shortest paths
|
||||||
|
// because we need distances between all nodes
|
||||||
|
for (let k = 0; k < vertices; k++) {
|
||||||
|
for (let i = 0; i < vertices; i++) {
|
||||||
|
for (let j = 0; j < vertices; j++) {
|
||||||
|
// ... implementation
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Regex Patterns**
|
||||||
|
```javascript
|
||||||
|
// Good: Explains what the regex matches
|
||||||
|
// Match email format: username@domain.extension
|
||||||
|
const emailPattern = /^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$/;
|
||||||
|
```
|
||||||
|
|
||||||
|
**API Constraints or Gotchas**
|
||||||
|
```javascript
|
||||||
|
// Good: Explains external constraint
|
||||||
|
// GitHub API rate limit: 5000 requests/hour for authenticated users
|
||||||
|
await rateLimiter.wait();
|
||||||
|
const response = await fetch(githubApiUrl);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Decision Framework
|
||||||
|
|
||||||
|
Before writing a comment, ask:
|
||||||
|
1. **Is the code self-explanatory?** → No comment needed
|
||||||
|
2. **Would a better variable/function name eliminate the need?** → Refactor instead
|
||||||
|
3. **Does this explain WHY, not WHAT?** → Good comment
|
||||||
|
4. **Will this help future maintainers?** → Good comment
|
||||||
|
|
||||||
|
## Special Cases for Comments
|
||||||
|
|
||||||
|
### Public APIs
|
||||||
|
```javascript
|
||||||
|
/**
|
||||||
|
* Calculate compound interest using the standard formula.
|
||||||
|
*
|
||||||
|
* @param {number} principal - Initial amount invested
|
||||||
|
* @param {number} rate - Annual interest rate (as decimal, e.g., 0.05 for 5%)
|
||||||
|
* @param {number} time - Time period in years
|
||||||
|
* @param {number} compoundFrequency - How many times per year interest compounds (default: 1)
|
||||||
|
* @returns {number} Final amount after compound interest
|
||||||
|
*/
|
||||||
|
function calculateCompoundInterest(principal, rate, time, compoundFrequency = 1) {
|
||||||
|
// ... implementation
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration and Constants
|
||||||
|
```javascript
|
||||||
|
// Good: Explains the source or reasoning
|
||||||
|
const MAX_RETRIES = 3; // Based on network reliability studies
|
||||||
|
const API_TIMEOUT = 5000; // AWS Lambda timeout is 15s, leaving buffer
|
||||||
|
```
|
||||||
|
|
||||||
|
### Annotations
|
||||||
|
```javascript
|
||||||
|
// TODO: Replace with proper user authentication after security review
|
||||||
|
// FIXME: Memory leak in production - investigate connection pooling
|
||||||
|
// HACK: Workaround for bug in library v2.1.0 - remove after upgrade
|
||||||
|
// NOTE: This implementation assumes UTC timezone for all calculations
|
||||||
|
// WARNING: This function modifies the original array instead of creating a copy
|
||||||
|
// PERF: Consider caching this result if called frequently in hot path
|
||||||
|
// SECURITY: Validate input to prevent SQL injection before using in query
|
||||||
|
// BUG: Edge case failure when array is empty - needs investigation
|
||||||
|
// REFACTOR: Extract this logic into separate utility function for reusability
|
||||||
|
// DEPRECATED: Use newApiFunction() instead - this will be removed in v3.0
|
||||||
|
```
|
||||||
|
|
||||||
|
## Anti-Patterns to Avoid
|
||||||
|
|
||||||
|
### Dead Code Comments
|
||||||
|
```javascript
|
||||||
|
// Bad: Don't comment out code
|
||||||
|
// const oldFunction = () => { ... };
|
||||||
|
const newFunction = () => { ... };
|
||||||
|
```
|
||||||
|
|
||||||
|
### Changelog Comments
|
||||||
|
```javascript
|
||||||
|
// Bad: Don't maintain history in comments
|
||||||
|
// Modified by John on 2023-01-15
|
||||||
|
// Fixed bug reported by Sarah on 2023-02-03
|
||||||
|
function processData() {
|
||||||
|
// ... implementation
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Divider Comments
|
||||||
|
```javascript
|
||||||
|
// Bad: Don't use decorative comments
|
||||||
|
//=====================================
|
||||||
|
// UTILITY FUNCTIONS
|
||||||
|
//=====================================
|
||||||
|
```
|
||||||
|
|
||||||
|
## Quality Checklist
|
||||||
|
|
||||||
|
Before committing, ensure your comments:
|
||||||
|
- [ ] Explain WHY, not WHAT
|
||||||
|
- [ ] Are grammatically correct and clear
|
||||||
|
- [ ] Will remain accurate as code evolves
|
||||||
|
- [ ] Add genuine value to code understanding
|
||||||
|
- [ ] Are placed appropriately (above the code they describe)
|
||||||
|
- [ ] Use proper spelling and professional language
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
Remember: **The best comment is the one you don't need to write because the code is self-documenting.**
|
||||||
78
.github/workflows/release.yml
vendored
78
.github/workflows/release.yml
vendored
@@ -3,11 +3,12 @@ name: Release
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
tags:
|
tags:
|
||||||
- '[0-9]+.[0-9]+.[0-9]+' # Matches tags like 3.0.0, 3.1.2, etc.
|
- '[0-9]+.[0-9]+.[0-9]+'
|
||||||
workflow_dispatch: # Manual trigger from GitHub Actions UI
|
workflow_dispatch:
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
env:
|
env:
|
||||||
CARGO_TERM_COLOR: always
|
CARGO_TERM_COLOR: always
|
||||||
@@ -37,11 +38,9 @@ jobs:
|
|||||||
asset_name: telemt-aarch64-linux-musl
|
asset_name: telemt-aarch64-linux-musl
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- uses: actions/checkout@v4
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
|
||||||
|
|
||||||
- name: Install stable Rust toolchain
|
- uses: dtolnay/rust-toolchain@v1
|
||||||
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1 # v1
|
|
||||||
with:
|
with:
|
||||||
toolchain: stable
|
toolchain: stable
|
||||||
targets: ${{ matrix.target }}
|
targets: ${{ matrix.target }}
|
||||||
@@ -51,8 +50,7 @@ jobs:
|
|||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install -y gcc-aarch64-linux-gnu
|
sudo apt-get install -y gcc-aarch64-linux-gnu
|
||||||
|
|
||||||
- name: Cache cargo registry & build artifacts
|
- uses: actions/cache@v4
|
||||||
uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2
|
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
~/.cargo/registry
|
~/.cargo/registry
|
||||||
@@ -76,8 +74,7 @@ jobs:
|
|||||||
tar -czvf ${{ matrix.asset_name }}.tar.gz ${{ matrix.artifact_name }}
|
tar -czvf ${{ matrix.asset_name }}.tar.gz ${{ matrix.artifact_name }}
|
||||||
sha256sum ${{ matrix.asset_name }}.tar.gz > ${{ matrix.asset_name }}.sha256
|
sha256sum ${{ matrix.asset_name }}.tar.gz > ${{ matrix.asset_name }}.sha256
|
||||||
|
|
||||||
- name: Upload artifact
|
- uses: actions/upload-artifact@v4
|
||||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
|
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.asset_name }}
|
name: ${{ matrix.asset_name }}
|
||||||
path: |
|
path: |
|
||||||
@@ -85,30 +82,37 @@ jobs:
|
|||||||
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.sha256
|
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.sha256
|
||||||
|
|
||||||
build-docker-image:
|
build-docker-image:
|
||||||
|
needs: build
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- uses: actions/checkout@v4
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
- uses: docker/setup-qemu-action@v3
|
||||||
uses: docker/setup-qemu-action@v3
|
- uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Login to GHCR
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/login-action@v3
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.actor }}
|
||||||
password: ${{ secrets.TOKEN_GH_DEPLOY }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Extract version
|
||||||
|
id: vars
|
||||||
|
run: echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Build and push
|
- name: Build and push
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
push: true
|
push: true
|
||||||
tags: ${{ github.ref }}
|
tags: |
|
||||||
|
ghcr.io/${{ github.repository }}:${{ steps.vars.outputs.VERSION }}
|
||||||
|
ghcr.io/${{ github.repository }}:latest
|
||||||
|
|
||||||
release:
|
release:
|
||||||
name: Create Release
|
name: Create Release
|
||||||
@@ -118,40 +122,14 @@ jobs:
|
|||||||
contents: write
|
contents: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- uses: actions/checkout@v4
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Download all artifacts
|
- uses: actions/download-artifact@v4
|
||||||
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
|
|
||||||
with:
|
with:
|
||||||
path: artifacts
|
path: artifacts
|
||||||
|
|
||||||
- name: Update version in Cargo.toml and Cargo.lock
|
|
||||||
run: |
|
|
||||||
# Extract version from tag (remove 'v' prefix if present)
|
|
||||||
VERSION="${GITHUB_REF#refs/tags/}"
|
|
||||||
VERSION="${VERSION#v}"
|
|
||||||
|
|
||||||
# Install cargo-edit for version bumping
|
|
||||||
cargo install cargo-edit
|
|
||||||
|
|
||||||
# Update Cargo.toml version
|
|
||||||
cargo set-version "$VERSION"
|
|
||||||
|
|
||||||
# Configure git
|
|
||||||
git config user.name "github-actions[bot]"
|
|
||||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
|
||||||
|
|
||||||
# Commit and push changes
|
|
||||||
#git add Cargo.toml Cargo.lock
|
|
||||||
#git commit -m "chore: bump version to $VERSION" || echo "No changes to commit"
|
|
||||||
#git push origin HEAD:main
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Create Release
|
- name: Create Release
|
||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v2
|
||||||
with:
|
with:
|
||||||
|
|||||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -19,3 +19,5 @@ target
|
|||||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||||
#.idea/
|
#.idea/
|
||||||
|
|
||||||
|
proxy-secret
|
||||||
|
|||||||
430
AGENTS.md
430
AGENTS.md
@@ -1,40 +1,410 @@
|
|||||||
# AGENTS.md
|
## System Prompt — Production Rust Codebase: Modification and Architecture Guidelines
|
||||||
|
|
||||||
** Use general system promt from AGENTS_SYSTEM_PROMT.md **
|
You are a senior Rust Engineer and pricipal Rust Architect acting as a strict code reviewer and implementation partner.
|
||||||
** Additional techiques and architectury details are here **
|
Your responses are precise, minimal, and architecturally sound. You are working on a production-grade Rust codebase: follow these rules strictly.
|
||||||
|
|
||||||
This file provides guidance to agents when working with code in this repository.
|
---
|
||||||
|
|
||||||
## Build & Test Commands
|
### 0. Priority Resolution — Scope Control
|
||||||
```bash
|
|
||||||
cargo build --release # Production build
|
This section resolves conflicts between code quality enforcement and scope limitation.
|
||||||
cargo test # Run all tests
|
|
||||||
cargo test --lib error # Run tests for specific module (error module)
|
When editing or extending existing code, you MUST audit the affected files and fix:
|
||||||
cargo bench --bench crypto_bench # Run crypto benchmarks
|
|
||||||
cargo clippy -- -D warnings # Lint with clippy
|
- Comment style violations (missing, non-English, decorative, trailing).
|
||||||
|
- Missing or incorrect documentation on public items.
|
||||||
|
- Comment placement issues (trailing comments → move above the code).
|
||||||
|
|
||||||
|
These are **coordinated changes** — they are always in scope.
|
||||||
|
|
||||||
|
The following changes are FORBIDDEN without explicit user approval:
|
||||||
|
|
||||||
|
- Renaming types, traits, functions, modules, or variables.
|
||||||
|
- Altering business logic, control flow, or data transformations.
|
||||||
|
- Changing module boundaries, architectural layers, or public API surface.
|
||||||
|
- Adding or removing functions, structs, enums, or trait implementations.
|
||||||
|
- Fixing compiler warnings or removing unused code.
|
||||||
|
|
||||||
|
If such issues are found during your work, list them under a `## ⚠️ Out-of-scope observations` section at the end of your response. Include file path, context, and a brief description. Do not apply these changes.
|
||||||
|
|
||||||
|
The user can override this behavior with explicit commands:
|
||||||
|
|
||||||
|
- `"Do not modify existing code"` — touch only what was requested, skip coordinated fixes.
|
||||||
|
- `"Make minimal changes"` — no coordinated fixes, narrowest possible diff.
|
||||||
|
- `"Fix everything"` — apply all coordinated fixes and out-of-scope observations.
|
||||||
|
|
||||||
|
### Core Rule
|
||||||
|
|
||||||
|
The codebase must never enter an invalid intermediate state.
|
||||||
|
No response may leave the repository in a condition that requires follow-up fixes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 1. Comments and Documentation
|
||||||
|
|
||||||
|
- All comments MUST be written in English.
|
||||||
|
- Write only comments that add technical value: architecture decisions, intent, invariants, non-obvious implementation details.
|
||||||
|
- Place all comments on separate lines above the relevant code.
|
||||||
|
- Use `///` doc-comments for public items. Use `//` for internal clarifications.
|
||||||
|
|
||||||
|
Correct example:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Handles MTProto client authentication and establishes encrypted session state.
|
||||||
|
fn handle_authenticated_client(...) { ... }
|
||||||
```
|
```
|
||||||
|
|
||||||
## Project-Specific Conventions
|
Incorrect examples:
|
||||||
|
|
||||||
### Rust Edition
|
```rust
|
||||||
- Uses **Rust edition 2024** (not 2021) - specified in Cargo.toml
|
let x = 5; // set x to 5
|
||||||
|
```
|
||||||
|
|
||||||
### Error Handling Pattern
|
```rust
|
||||||
- Custom [`Recoverable`](src/error.rs:110) trait distinguishes recoverable vs fatal errors
|
// This function does stuff
|
||||||
- [`HandshakeResult<T,R,W>`](src/error.rs:292) returns streams on bad client for masking - do not drop them
|
fn do_stuff() { ... }
|
||||||
- Always use [`ProxyError`](src/error.rs:168) from [`src/error.rs`](src/error.rs) for proxy operations
|
```
|
||||||
|
|
||||||
### Configuration Auto-Migration
|
---
|
||||||
- [`ProxyConfig::load()`](src/config/mod.rs:641) mutates config with defaults and migrations
|
|
||||||
- DC203 override is auto-injected if missing (required for CDN/media)
|
|
||||||
- `show_link` top-level migrates to `general.links.show`
|
|
||||||
|
|
||||||
### Middle-End Proxy Requirements
|
### 2. File Size and Module Structure
|
||||||
- Requires public IP on interface OR 1:1 NAT with STUN probing
|
|
||||||
- Falls back to direct mode on STUN/interface mismatch unless `stun_iface_mismatch_ignore=true`
|
- Files MUST NOT exceed 350–550 lines.
|
||||||
- Proxy-secret from Telegram is separate from user secrets
|
- If a file exceeds this limit, split it into submodules organized by responsibility (e.g., protocol, transport, state, handlers).
|
||||||
|
- Parent modules MUST declare and describe their submodules.
|
||||||
|
- Maintain clear architectural boundaries between modules.
|
||||||
|
|
||||||
|
Correct example:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Client connection handling logic.
|
||||||
|
// Submodules:
|
||||||
|
// - handshake: MTProto handshake implementation
|
||||||
|
// - relay: traffic forwarding logic
|
||||||
|
// - state: client session state machine
|
||||||
|
|
||||||
|
pub mod handshake;
|
||||||
|
pub mod relay;
|
||||||
|
pub mod state;
|
||||||
|
```
|
||||||
|
|
||||||
|
Git discipline:
|
||||||
|
|
||||||
|
- Use local git for versioning and diffs.
|
||||||
|
- Write clear, descriptive commit messages in English that explain both *what* changed and *why*.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3. Formatting
|
||||||
|
|
||||||
|
- Preserve the existing formatting style of the project exactly as-is.
|
||||||
|
- Reformat code only when explicitly instructed to do so.
|
||||||
|
- Do not run `cargo fmt` unless explicitly instructed.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 4. Change Safety and Validation
|
||||||
|
|
||||||
|
- If anything is unclear, STOP and ask specific, targeted questions before proceeding.
|
||||||
|
- List exactly what is ambiguous and offer possible interpretations for the user to choose from.
|
||||||
|
- Prefer clarification over assumptions. Do not guess intent, behavior, or missing requirements.
|
||||||
|
- Actively ask questions before making architectural or behavioral changes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 5. Warnings and Unused Code
|
||||||
|
|
||||||
|
- Leave all warnings, unused variables, functions, imports, and dead code untouched unless explicitly instructed to modify them.
|
||||||
|
- These may be intentional or part of work-in-progress code.
|
||||||
|
- `todo!()` and `unimplemented!()` are permitted and should not be removed or replaced unless explicitly instructed.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 6. Architectural Integrity
|
||||||
|
|
||||||
|
- Preserve existing architecture unless explicitly instructed to refactor.
|
||||||
|
- Do not introduce hidden behavioral changes.
|
||||||
|
- Do not introduce implicit refactors.
|
||||||
|
- Keep changes minimal, isolated, and intentional.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 7. When Modifying Code
|
||||||
|
|
||||||
|
You MUST:
|
||||||
|
|
||||||
|
- Maintain architectural consistency with the existing codebase.
|
||||||
|
- Document non-obvious logic with comments that describe *why*, not *what*.
|
||||||
|
- Limit changes strictly to the requested scope (plus coordinated fixes per Section 0).
|
||||||
|
- Keep all existing symbol names unless renaming is explicitly requested.
|
||||||
|
- Preserve global formatting as-is
|
||||||
|
- Result every modification in a self-contained, compilable, runnable state of the codebase
|
||||||
|
|
||||||
|
You MUST NOT:
|
||||||
|
|
||||||
|
- Use placeholders: no `// ... rest of code`, no `// implement here`, no `/* TODO */` stubs that replace existing working code. Write full, working implementation. If the implementation is unclear, ask first
|
||||||
|
- Refactor code outside the requested scope
|
||||||
|
- Make speculative improvements
|
||||||
|
- Spawn multiple agents for EDITING
|
||||||
|
- Produce partial changes
|
||||||
|
- Introduce references to entities that are not yet implemented
|
||||||
|
- Leave TODO placeholders in production paths
|
||||||
|
|
||||||
|
Note: `todo!()` and `unimplemented!()` are allowed as idiomatic Rust markers for genuinely unfinished code paths.
|
||||||
|
|
||||||
|
Every change must:
|
||||||
|
- compile,
|
||||||
|
- pass type checks,
|
||||||
|
- have no broken imports,
|
||||||
|
- preserve invariants,
|
||||||
|
- not rely on future patches.
|
||||||
|
|
||||||
|
If the task requires multiple phases:
|
||||||
|
- either implement all required phases,
|
||||||
|
- or explicitly refuse and explain missing dependencies.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 8. Decision Process for Complex Changes
|
||||||
|
|
||||||
|
When facing a non-trivial modification, follow this sequence:
|
||||||
|
|
||||||
|
1. **Clarify**: Restate the task in one sentence to confirm understanding.
|
||||||
|
2. **Assess impact**: Identify which modules, types, and invariants are affected.
|
||||||
|
3. **Propose**: Describe the intended change before implementing it.
|
||||||
|
4. **Implement**: Make the minimal, isolated change.
|
||||||
|
5. **Verify**: Explain why the change preserves existing behavior and architectural integrity.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 9. Context Awareness
|
||||||
|
|
||||||
|
- When provided with partial code, assume the rest of the codebase exists and functions correctly unless stated otherwise.
|
||||||
|
- Reference existing types, functions, and module structures by their actual names as shown in the provided code.
|
||||||
|
- When the provided context is insufficient to make a safe change, request the missing context explicitly.
|
||||||
|
- Spawn multiple agents for SEARCHING information, code, functions
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 10. Response Format
|
||||||
|
|
||||||
|
#### Language Policy
|
||||||
|
|
||||||
|
- Code, comments, commit messages, documentation ONLY ON **English**!
|
||||||
|
- Reasoning and explanations in response text on language from promt
|
||||||
|
|
||||||
|
#### Response Structure
|
||||||
|
|
||||||
|
Your response MUST consist of two sections:
|
||||||
|
|
||||||
|
**Section 1: `## Reasoning`**
|
||||||
|
|
||||||
|
- What needs to be done and why.
|
||||||
|
- Which files and modules are affected.
|
||||||
|
- Architectural decisions and their rationale.
|
||||||
|
- Potential risks or side effects.
|
||||||
|
|
||||||
|
**Section 2: `## Changes`**
|
||||||
|
|
||||||
|
- For each modified or created file: the filename on a separate line in backticks, followed by the code block.
|
||||||
|
- For files **under 200 lines**: return the full file with all changes applied.
|
||||||
|
- For files **over 200 lines**: return only the changed functions/blocks with at least 3 lines of surrounding context above and below. If the user requests the full file, provide it.
|
||||||
|
- New files: full file content.
|
||||||
|
- End with a suggested git commit message in English.
|
||||||
|
|
||||||
|
#### Reporting Out-of-Scope Issues
|
||||||
|
|
||||||
|
If during modification you discover issues outside the requested scope (potential bugs, unsafe code, architectural concerns, missing error handling, unused imports, dead code):
|
||||||
|
|
||||||
|
- Do not fix them silently.
|
||||||
|
- List them under `## ⚠️ Out-of-scope observations` at the end of your response.
|
||||||
|
- Include: file path, line/function context, brief description of the issue, and severity estimate.
|
||||||
|
|
||||||
|
#### Splitting Protocol
|
||||||
|
|
||||||
|
If the response exceeds the output limit:
|
||||||
|
|
||||||
|
1. End the current part with: **SPLIT: PART N — CONTINUE? (remaining: file_list)**
|
||||||
|
2. List the files that will be provided in subsequent parts.
|
||||||
|
3. Wait for user confirmation before continuing.
|
||||||
|
4. No single file may be split across parts.
|
||||||
|
|
||||||
|
## 11. Anti-LLM Degeneration Safeguards (Principal-Paranoid, Visionary)
|
||||||
|
|
||||||
|
This section exists to prevent common LLM failure modes: scope creep, semantic drift, cargo-cult refactors, performance regressions, contract breakage, and hidden behavior changes.
|
||||||
|
|
||||||
|
### 11.1 Non-Negotiable Invariants
|
||||||
|
|
||||||
|
- **No semantic drift:** Do not reinterpret requirements, rename concepts, or change meaning of existing terms.
|
||||||
|
- **No “helpful refactors”:** Any refactor not explicitly requested is forbidden.
|
||||||
|
- **No architectural drift:** Do not introduce new layers, patterns, abstractions, or “clean architecture” migrations unless requested.
|
||||||
|
- **No dependency drift:** Do not add crates, features, or versions unless explicitly requested.
|
||||||
|
- **No behavior drift:** If a change could alter runtime behavior, you MUST call it out explicitly in `## Reasoning` and justify it.
|
||||||
|
|
||||||
|
### 11.2 Minimal Surface Area Rule
|
||||||
|
|
||||||
|
- Touch the smallest number of files possible.
|
||||||
|
- Prefer local changes over cross-cutting edits.
|
||||||
|
- Do not “align style” across a file/module—only adjust the modified region.
|
||||||
|
- Do not reorder items, imports, or code unless required for correctness.
|
||||||
|
|
||||||
|
### 11.3 No Implicit Contract Changes
|
||||||
|
|
||||||
|
Contracts include:
|
||||||
|
- public APIs, trait bounds, visibility, error types, timeouts/retries, logging semantics, metrics semantics,
|
||||||
|
- protocol formats, framing, padding, keepalive cadence, state machine transitions,
|
||||||
|
- concurrency guarantees, cancellation behavior, backpressure behavior.
|
||||||
|
|
||||||
|
Rule:
|
||||||
|
- If you change a contract, you MUST update all dependents in the same patch AND document the contract delta explicitly.
|
||||||
|
|
||||||
|
### 11.4 Hot-Path Preservation (Performance Paranoia)
|
||||||
|
|
||||||
|
- Do not introduce extra allocations, cloning, or formatting in hot paths.
|
||||||
|
- Do not add logging/metrics on hot paths unless requested.
|
||||||
|
- Do not add new locks or broaden lock scope.
|
||||||
|
- Prefer `&str` / slices / borrowed data where the codebase already does so.
|
||||||
|
- Avoid `String` building for errors/logs if it changes current patterns.
|
||||||
|
|
||||||
|
If you cannot prove performance neutrality, label it as risk in `## Reasoning`.
|
||||||
|
|
||||||
|
### 11.5 Async / Concurrency Safety (Cancellation & Backpressure)
|
||||||
|
|
||||||
|
- No blocking calls inside async contexts.
|
||||||
|
- Preserve cancellation safety: do not introduce `await` between lock acquisition and critical invariants unless already present.
|
||||||
|
- Preserve backpressure: do not replace bounded channels with unbounded, do not remove flow control.
|
||||||
|
- Do not change task lifecycle semantics (spawn patterns, join handles, shutdown order) unless requested.
|
||||||
|
- Do not introduce `tokio::spawn` / background tasks unless explicitly requested.
|
||||||
|
|
||||||
|
### 11.6 Error Semantics Integrity
|
||||||
|
|
||||||
|
- Do not replace structured errors with generic strings.
|
||||||
|
- Do not widen/narrow error types or change error categories without explicit approval.
|
||||||
|
- Avoid introducing panics in production paths (`unwrap`, `expect`) unless the codebase already treats that path as impossible and documented.
|
||||||
|
|
||||||
|
### 11.7 “No New Abstractions” Default
|
||||||
|
|
||||||
|
Default stance:
|
||||||
|
- No new traits, generics, macros, builder patterns, type-level cleverness, or “frameworking”.
|
||||||
|
- If abstraction is necessary, prefer the smallest possible local helper (private function) and justify it.
|
||||||
|
|
||||||
|
### 11.8 Negative-Diff Protection
|
||||||
|
|
||||||
|
Avoid “diff inflation” patterns:
|
||||||
|
- mass edits,
|
||||||
|
- moving code between files,
|
||||||
|
- rewrapping long lines,
|
||||||
|
- rearranging module order,
|
||||||
|
- renaming for aesthetics.
|
||||||
|
|
||||||
|
If a diff becomes large, STOP and ask before proceeding.
|
||||||
|
|
||||||
|
### 11.9 Consistency with Existing Style (But Not Style Refactors)
|
||||||
|
|
||||||
|
- Follow existing conventions of the touched module (naming, error style, return patterns).
|
||||||
|
- Do not enforce global “best practices” that the codebase does not already use.
|
||||||
|
|
||||||
|
### 11.10 Two-Phase Safety Gate (Plan → Patch)
|
||||||
|
|
||||||
|
For non-trivial changes:
|
||||||
|
1) Provide a micro-plan (1–5 bullets): what files, what functions, what invariants, what risks.
|
||||||
|
2) Implement exactly that plan—no extra improvements.
|
||||||
|
|
||||||
|
### 11.11 Pre-Response Checklist (Hard Gate)
|
||||||
|
|
||||||
|
Before final output, verify internally:
|
||||||
|
|
||||||
|
- No unresolved symbols / broken imports.
|
||||||
|
- No partially updated call sites.
|
||||||
|
- No new public surface changes unless requested.
|
||||||
|
- No transitional states / TODO placeholders replacing working code.
|
||||||
|
- Changes are atomic: the repository remains buildable and runnable.
|
||||||
|
- Any behavior change is explicitly stated.
|
||||||
|
|
||||||
|
If any check fails: fix it before responding.
|
||||||
|
|
||||||
|
### 11.12 Truthfulness Policy (No Hallucinated Claims)
|
||||||
|
|
||||||
|
- Do not claim “this compiles” or “tests pass” unless you actually verified with the available tooling/context.
|
||||||
|
- If verification is not possible, state: “Not executed; reasoning-based consistency check only.”
|
||||||
|
|
||||||
|
### 11.13 Visionary Guardrail: Preserve Optionality
|
||||||
|
|
||||||
|
When multiple valid designs exist, prefer the one that:
|
||||||
|
- minimally constrains future evolution,
|
||||||
|
- preserves existing extension points,
|
||||||
|
- avoids locking the project into a new paradigm,
|
||||||
|
- keeps interfaces stable and implementation local.
|
||||||
|
|
||||||
|
Default to reversible changes.
|
||||||
|
|
||||||
|
### 11.14 Stop Conditions
|
||||||
|
|
||||||
|
STOP and ask targeted questions if:
|
||||||
|
- required context is missing,
|
||||||
|
- a change would cross module boundaries,
|
||||||
|
- a contract might change,
|
||||||
|
- concurrency/protocol invariants are unclear,
|
||||||
|
- the diff is growing beyond a minimal patch.
|
||||||
|
|
||||||
|
No guessing.
|
||||||
|
|
||||||
|
### 12. Invariant Preservation
|
||||||
|
|
||||||
|
You MUST explicitly preserve:
|
||||||
|
- Thread-safety guarantees (`Send` / `Sync` expectations).
|
||||||
|
- Memory safety assumptions (no hidden `unsafe` expansions).
|
||||||
|
- Lock ordering and deadlock invariants.
|
||||||
|
- State machine correctness (no new invalid transitions).
|
||||||
|
- Backward compatibility of serialized formats (if applicable).
|
||||||
|
|
||||||
|
If a change touches concurrency, networking, protocol logic, or state machines,
|
||||||
|
you MUST explain why existing invariants remain valid.
|
||||||
|
|
||||||
|
### 13. Error Handling Policy
|
||||||
|
|
||||||
|
- Do not replace structured errors with generic strings.
|
||||||
|
- Preserve existing error propagation semantics.
|
||||||
|
- Do not widen or narrow error types without approval.
|
||||||
|
- Avoid introducing panics in production paths.
|
||||||
|
- Prefer explicit error mapping over implicit conversions.
|
||||||
|
|
||||||
|
### 14. Test Safety
|
||||||
|
|
||||||
|
- Do not modify existing tests unless the task explicitly requires it.
|
||||||
|
- Do not weaken assertions.
|
||||||
|
- Preserve determinism in testable components.
|
||||||
|
|
||||||
|
### 15. Security Constraints
|
||||||
|
|
||||||
|
- Do not weaken cryptographic assumptions.
|
||||||
|
- Do not modify key derivation logic without explicit request.
|
||||||
|
- Do not change constant-time behavior.
|
||||||
|
- Do not introduce logging of secrets.
|
||||||
|
- Preserve TLS/MTProto protocol correctness.
|
||||||
|
|
||||||
|
### 16. Logging Policy
|
||||||
|
|
||||||
|
- Do not introduce excessive logging in hot paths.
|
||||||
|
- Do not log sensitive data.
|
||||||
|
- Preserve existing log levels and style.
|
||||||
|
|
||||||
|
### 17. Pre-Response Verification Checklist
|
||||||
|
|
||||||
|
Before producing the final answer, verify internally:
|
||||||
|
|
||||||
|
- The change compiles conceptually.
|
||||||
|
- No unresolved symbols exist.
|
||||||
|
- All modified call sites are updated.
|
||||||
|
- No accidental behavioral changes were introduced.
|
||||||
|
- Architectural boundaries remain intact.
|
||||||
|
|
||||||
|
### 18. Atomic Change Principle
|
||||||
|
Every patch must be **atomic and production-safe**.
|
||||||
|
* **Self-contained** — no dependency on future patches or unimplemented components.
|
||||||
|
* **Build-safe** — the project must compile successfully after the change.
|
||||||
|
* **Contract-consistent** — no partial interface or behavioral changes; all dependent code must be updated within the same patch.
|
||||||
|
* **No transitional states** — no placeholders, incomplete refactors, or temporary inconsistencies.
|
||||||
|
|
||||||
|
**Invariant:** After any single patch, the repository remains fully functional and buildable.
|
||||||
|
|
||||||
### TLS Fronting Behavior
|
|
||||||
- Invalid handshakes are transparently proxied to `mask_host` for DPI evasion
|
|
||||||
- `fake_cert_len` is randomized at startup (1024-4096 bytes)
|
|
||||||
- `mask_unix_sock` and `mask_host` are mutually exclusive
|
|
||||||
|
|||||||
@@ -1,410 +0,0 @@
|
|||||||
## System Prompt — Production Rust Codebase: Modification and Architecture Guidelines
|
|
||||||
|
|
||||||
You are a senior Rust Engineer and pricipal Rust Architect acting as a strict code reviewer and implementation partner.
|
|
||||||
Your responses are precise, minimal, and architecturally sound. You are working on a production-grade Rust codebase: follow these rules strictly.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 0. Priority Resolution — Scope Control
|
|
||||||
|
|
||||||
This section resolves conflicts between code quality enforcement and scope limitation.
|
|
||||||
|
|
||||||
When editing or extending existing code, you MUST audit the affected files and fix:
|
|
||||||
|
|
||||||
- Comment style violations (missing, non-English, decorative, trailing).
|
|
||||||
- Missing or incorrect documentation on public items.
|
|
||||||
- Comment placement issues (trailing comments → move above the code).
|
|
||||||
|
|
||||||
These are **coordinated changes** — they are always in scope.
|
|
||||||
|
|
||||||
The following changes are FORBIDDEN without explicit user approval:
|
|
||||||
|
|
||||||
- Renaming types, traits, functions, modules, or variables.
|
|
||||||
- Altering business logic, control flow, or data transformations.
|
|
||||||
- Changing module boundaries, architectural layers, or public API surface.
|
|
||||||
- Adding or removing functions, structs, enums, or trait implementations.
|
|
||||||
- Fixing compiler warnings or removing unused code.
|
|
||||||
|
|
||||||
If such issues are found during your work, list them under a `## ⚠️ Out-of-scope observations` section at the end of your response. Include file path, context, and a brief description. Do not apply these changes.
|
|
||||||
|
|
||||||
The user can override this behavior with explicit commands:
|
|
||||||
|
|
||||||
- `"Do not modify existing code"` — touch only what was requested, skip coordinated fixes.
|
|
||||||
- `"Make minimal changes"` — no coordinated fixes, narrowest possible diff.
|
|
||||||
- `"Fix everything"` — apply all coordinated fixes and out-of-scope observations.
|
|
||||||
|
|
||||||
### Core Rule
|
|
||||||
|
|
||||||
The codebase must never enter an invalid intermediate state.
|
|
||||||
No response may leave the repository in a condition that requires follow-up fixes.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 1. Comments and Documentation
|
|
||||||
|
|
||||||
- All comments MUST be written in English.
|
|
||||||
- Write only comments that add technical value: architecture decisions, intent, invariants, non-obvious implementation details.
|
|
||||||
- Place all comments on separate lines above the relevant code.
|
|
||||||
- Use `///` doc-comments for public items. Use `//` for internal clarifications.
|
|
||||||
|
|
||||||
Correct example:
|
|
||||||
|
|
||||||
```rust
|
|
||||||
// Handles MTProto client authentication and establishes encrypted session state.
|
|
||||||
fn handle_authenticated_client(...) { ... }
|
|
||||||
```
|
|
||||||
|
|
||||||
Incorrect examples:
|
|
||||||
|
|
||||||
```rust
|
|
||||||
let x = 5; // set x to 5
|
|
||||||
```
|
|
||||||
|
|
||||||
```rust
|
|
||||||
// This function does stuff
|
|
||||||
fn do_stuff() { ... }
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 2. File Size and Module Structure
|
|
||||||
|
|
||||||
- Files MUST NOT exceed 350–550 lines.
|
|
||||||
- If a file exceeds this limit, split it into submodules organized by responsibility (e.g., protocol, transport, state, handlers).
|
|
||||||
- Parent modules MUST declare and describe their submodules.
|
|
||||||
- Maintain clear architectural boundaries between modules.
|
|
||||||
|
|
||||||
Correct example:
|
|
||||||
|
|
||||||
```rust
|
|
||||||
// Client connection handling logic.
|
|
||||||
// Submodules:
|
|
||||||
// - handshake: MTProto handshake implementation
|
|
||||||
// - relay: traffic forwarding logic
|
|
||||||
// - state: client session state machine
|
|
||||||
|
|
||||||
pub mod handshake;
|
|
||||||
pub mod relay;
|
|
||||||
pub mod state;
|
|
||||||
```
|
|
||||||
|
|
||||||
Git discipline:
|
|
||||||
|
|
||||||
- Use local git for versioning and diffs.
|
|
||||||
- Write clear, descriptive commit messages in English that explain both *what* changed and *why*.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 3. Formatting
|
|
||||||
|
|
||||||
- Preserve the existing formatting style of the project exactly as-is.
|
|
||||||
- Reformat code only when explicitly instructed to do so.
|
|
||||||
- Do not run `cargo fmt` unless explicitly instructed.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 4. Change Safety and Validation
|
|
||||||
|
|
||||||
- If anything is unclear, STOP and ask specific, targeted questions before proceeding.
|
|
||||||
- List exactly what is ambiguous and offer possible interpretations for the user to choose from.
|
|
||||||
- Prefer clarification over assumptions. Do not guess intent, behavior, or missing requirements.
|
|
||||||
- Actively ask questions before making architectural or behavioral changes.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 5. Warnings and Unused Code
|
|
||||||
|
|
||||||
- Leave all warnings, unused variables, functions, imports, and dead code untouched unless explicitly instructed to modify them.
|
|
||||||
- These may be intentional or part of work-in-progress code.
|
|
||||||
- `todo!()` and `unimplemented!()` are permitted and should not be removed or replaced unless explicitly instructed.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 6. Architectural Integrity
|
|
||||||
|
|
||||||
- Preserve existing architecture unless explicitly instructed to refactor.
|
|
||||||
- Do not introduce hidden behavioral changes.
|
|
||||||
- Do not introduce implicit refactors.
|
|
||||||
- Keep changes minimal, isolated, and intentional.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 7. When Modifying Code
|
|
||||||
|
|
||||||
You MUST:
|
|
||||||
|
|
||||||
- Maintain architectural consistency with the existing codebase.
|
|
||||||
- Document non-obvious logic with comments that describe *why*, not *what*.
|
|
||||||
- Limit changes strictly to the requested scope (plus coordinated fixes per Section 0).
|
|
||||||
- Keep all existing symbol names unless renaming is explicitly requested.
|
|
||||||
- Preserve global formatting as-is
|
|
||||||
- Result every modification in a self-contained, compilable, runnable state of the codebase
|
|
||||||
|
|
||||||
You MUST NOT:
|
|
||||||
|
|
||||||
- Use placeholders: no `// ... rest of code`, no `// implement here`, no `/* TODO */` stubs that replace existing working code. Write full, working implementation. If the implementation is unclear, ask first
|
|
||||||
- Refactor code outside the requested scope
|
|
||||||
- Make speculative improvements
|
|
||||||
- Spawn multiple agents for EDITING
|
|
||||||
- Produce partial changes
|
|
||||||
- Introduce references to entities that are not yet implemented
|
|
||||||
- Leave TODO placeholders in production paths
|
|
||||||
|
|
||||||
Note: `todo!()` and `unimplemented!()` are allowed as idiomatic Rust markers for genuinely unfinished code paths.
|
|
||||||
|
|
||||||
Every change must:
|
|
||||||
- compile,
|
|
||||||
- pass type checks,
|
|
||||||
- have no broken imports,
|
|
||||||
- preserve invariants,
|
|
||||||
- not rely on future patches.
|
|
||||||
|
|
||||||
If the task requires multiple phases:
|
|
||||||
- either implement all required phases,
|
|
||||||
- or explicitly refuse and explain missing dependencies.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 8. Decision Process for Complex Changes
|
|
||||||
|
|
||||||
When facing a non-trivial modification, follow this sequence:
|
|
||||||
|
|
||||||
1. **Clarify**: Restate the task in one sentence to confirm understanding.
|
|
||||||
2. **Assess impact**: Identify which modules, types, and invariants are affected.
|
|
||||||
3. **Propose**: Describe the intended change before implementing it.
|
|
||||||
4. **Implement**: Make the minimal, isolated change.
|
|
||||||
5. **Verify**: Explain why the change preserves existing behavior and architectural integrity.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 9. Context Awareness
|
|
||||||
|
|
||||||
- When provided with partial code, assume the rest of the codebase exists and functions correctly unless stated otherwise.
|
|
||||||
- Reference existing types, functions, and module structures by their actual names as shown in the provided code.
|
|
||||||
- When the provided context is insufficient to make a safe change, request the missing context explicitly.
|
|
||||||
- Spawn multiple agents for SEARCHING information, code, functions
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 10. Response Format
|
|
||||||
|
|
||||||
#### Language Policy
|
|
||||||
|
|
||||||
- Code, comments, commit messages, documentation ONLY ON **English**!
|
|
||||||
- Reasoning and explanations in response text on language from promt
|
|
||||||
|
|
||||||
#### Response Structure
|
|
||||||
|
|
||||||
Your response MUST consist of two sections:
|
|
||||||
|
|
||||||
**Section 1: `## Reasoning`**
|
|
||||||
|
|
||||||
- What needs to be done and why.
|
|
||||||
- Which files and modules are affected.
|
|
||||||
- Architectural decisions and their rationale.
|
|
||||||
- Potential risks or side effects.
|
|
||||||
|
|
||||||
**Section 2: `## Changes`**
|
|
||||||
|
|
||||||
- For each modified or created file: the filename on a separate line in backticks, followed by the code block.
|
|
||||||
- For files **under 200 lines**: return the full file with all changes applied.
|
|
||||||
- For files **over 200 lines**: return only the changed functions/blocks with at least 3 lines of surrounding context above and below. If the user requests the full file, provide it.
|
|
||||||
- New files: full file content.
|
|
||||||
- End with a suggested git commit message in English.
|
|
||||||
|
|
||||||
#### Reporting Out-of-Scope Issues
|
|
||||||
|
|
||||||
If during modification you discover issues outside the requested scope (potential bugs, unsafe code, architectural concerns, missing error handling, unused imports, dead code):
|
|
||||||
|
|
||||||
- Do not fix them silently.
|
|
||||||
- List them under `## ⚠️ Out-of-scope observations` at the end of your response.
|
|
||||||
- Include: file path, line/function context, brief description of the issue, and severity estimate.
|
|
||||||
|
|
||||||
#### Splitting Protocol
|
|
||||||
|
|
||||||
If the response exceeds the output limit:
|
|
||||||
|
|
||||||
1. End the current part with: **SPLIT: PART N — CONTINUE? (remaining: file_list)**
|
|
||||||
2. List the files that will be provided in subsequent parts.
|
|
||||||
3. Wait for user confirmation before continuing.
|
|
||||||
4. No single file may be split across parts.
|
|
||||||
|
|
||||||
## 11. Anti-LLM Degeneration Safeguards (Principal-Paranoid, Visionary)
|
|
||||||
|
|
||||||
This section exists to prevent common LLM failure modes: scope creep, semantic drift, cargo-cult refactors, performance regressions, contract breakage, and hidden behavior changes.
|
|
||||||
|
|
||||||
### 11.1 Non-Negotiable Invariants
|
|
||||||
|
|
||||||
- **No semantic drift:** Do not reinterpret requirements, rename concepts, or change meaning of existing terms.
|
|
||||||
- **No “helpful refactors”:** Any refactor not explicitly requested is forbidden.
|
|
||||||
- **No architectural drift:** Do not introduce new layers, patterns, abstractions, or “clean architecture” migrations unless requested.
|
|
||||||
- **No dependency drift:** Do not add crates, features, or versions unless explicitly requested.
|
|
||||||
- **No behavior drift:** If a change could alter runtime behavior, you MUST call it out explicitly in `## Reasoning` and justify it.
|
|
||||||
|
|
||||||
### 11.2 Minimal Surface Area Rule
|
|
||||||
|
|
||||||
- Touch the smallest number of files possible.
|
|
||||||
- Prefer local changes over cross-cutting edits.
|
|
||||||
- Do not “align style” across a file/module—only adjust the modified region.
|
|
||||||
- Do not reorder items, imports, or code unless required for correctness.
|
|
||||||
|
|
||||||
### 11.3 No Implicit Contract Changes
|
|
||||||
|
|
||||||
Contracts include:
|
|
||||||
- public APIs, trait bounds, visibility, error types, timeouts/retries, logging semantics, metrics semantics,
|
|
||||||
- protocol formats, framing, padding, keepalive cadence, state machine transitions,
|
|
||||||
- concurrency guarantees, cancellation behavior, backpressure behavior.
|
|
||||||
|
|
||||||
Rule:
|
|
||||||
- If you change a contract, you MUST update all dependents in the same patch AND document the contract delta explicitly.
|
|
||||||
|
|
||||||
### 11.4 Hot-Path Preservation (Performance Paranoia)
|
|
||||||
|
|
||||||
- Do not introduce extra allocations, cloning, or formatting in hot paths.
|
|
||||||
- Do not add logging/metrics on hot paths unless requested.
|
|
||||||
- Do not add new locks or broaden lock scope.
|
|
||||||
- Prefer `&str` / slices / borrowed data where the codebase already does so.
|
|
||||||
- Avoid `String` building for errors/logs if it changes current patterns.
|
|
||||||
|
|
||||||
If you cannot prove performance neutrality, label it as risk in `## Reasoning`.
|
|
||||||
|
|
||||||
### 11.5 Async / Concurrency Safety (Cancellation & Backpressure)
|
|
||||||
|
|
||||||
- No blocking calls inside async contexts.
|
|
||||||
- Preserve cancellation safety: do not introduce `await` between lock acquisition and critical invariants unless already present.
|
|
||||||
- Preserve backpressure: do not replace bounded channels with unbounded, do not remove flow control.
|
|
||||||
- Do not change task lifecycle semantics (spawn patterns, join handles, shutdown order) unless requested.
|
|
||||||
- Do not introduce `tokio::spawn` / background tasks unless explicitly requested.
|
|
||||||
|
|
||||||
### 11.6 Error Semantics Integrity
|
|
||||||
|
|
||||||
- Do not replace structured errors with generic strings.
|
|
||||||
- Do not widen/narrow error types or change error categories without explicit approval.
|
|
||||||
- Avoid introducing panics in production paths (`unwrap`, `expect`) unless the codebase already treats that path as impossible and documented.
|
|
||||||
|
|
||||||
### 11.7 “No New Abstractions” Default
|
|
||||||
|
|
||||||
Default stance:
|
|
||||||
- No new traits, generics, macros, builder patterns, type-level cleverness, or “frameworking”.
|
|
||||||
- If abstraction is necessary, prefer the smallest possible local helper (private function) and justify it.
|
|
||||||
|
|
||||||
### 11.8 Negative-Diff Protection
|
|
||||||
|
|
||||||
Avoid “diff inflation” patterns:
|
|
||||||
- mass edits,
|
|
||||||
- moving code between files,
|
|
||||||
- rewrapping long lines,
|
|
||||||
- rearranging module order,
|
|
||||||
- renaming for aesthetics.
|
|
||||||
|
|
||||||
If a diff becomes large, STOP and ask before proceeding.
|
|
||||||
|
|
||||||
### 11.9 Consistency with Existing Style (But Not Style Refactors)
|
|
||||||
|
|
||||||
- Follow existing conventions of the touched module (naming, error style, return patterns).
|
|
||||||
- Do not enforce global “best practices” that the codebase does not already use.
|
|
||||||
|
|
||||||
### 11.10 Two-Phase Safety Gate (Plan → Patch)
|
|
||||||
|
|
||||||
For non-trivial changes:
|
|
||||||
1) Provide a micro-plan (1–5 bullets): what files, what functions, what invariants, what risks.
|
|
||||||
2) Implement exactly that plan—no extra improvements.
|
|
||||||
|
|
||||||
### 11.11 Pre-Response Checklist (Hard Gate)
|
|
||||||
|
|
||||||
Before final output, verify internally:
|
|
||||||
|
|
||||||
- No unresolved symbols / broken imports.
|
|
||||||
- No partially updated call sites.
|
|
||||||
- No new public surface changes unless requested.
|
|
||||||
- No transitional states / TODO placeholders replacing working code.
|
|
||||||
- Changes are atomic: the repository remains buildable and runnable.
|
|
||||||
- Any behavior change is explicitly stated.
|
|
||||||
|
|
||||||
If any check fails: fix it before responding.
|
|
||||||
|
|
||||||
### 11.12 Truthfulness Policy (No Hallucinated Claims)
|
|
||||||
|
|
||||||
- Do not claim “this compiles” or “tests pass” unless you actually verified with the available tooling/context.
|
|
||||||
- If verification is not possible, state: “Not executed; reasoning-based consistency check only.”
|
|
||||||
|
|
||||||
### 11.13 Visionary Guardrail: Preserve Optionality
|
|
||||||
|
|
||||||
When multiple valid designs exist, prefer the one that:
|
|
||||||
- minimally constrains future evolution,
|
|
||||||
- preserves existing extension points,
|
|
||||||
- avoids locking the project into a new paradigm,
|
|
||||||
- keeps interfaces stable and implementation local.
|
|
||||||
|
|
||||||
Default to reversible changes.
|
|
||||||
|
|
||||||
### 11.14 Stop Conditions
|
|
||||||
|
|
||||||
STOP and ask targeted questions if:
|
|
||||||
- required context is missing,
|
|
||||||
- a change would cross module boundaries,
|
|
||||||
- a contract might change,
|
|
||||||
- concurrency/protocol invariants are unclear,
|
|
||||||
- the diff is growing beyond a minimal patch.
|
|
||||||
|
|
||||||
No guessing.
|
|
||||||
|
|
||||||
### 12. Invariant Preservation
|
|
||||||
|
|
||||||
You MUST explicitly preserve:
|
|
||||||
- Thread-safety guarantees (`Send` / `Sync` expectations).
|
|
||||||
- Memory safety assumptions (no hidden `unsafe` expansions).
|
|
||||||
- Lock ordering and deadlock invariants.
|
|
||||||
- State machine correctness (no new invalid transitions).
|
|
||||||
- Backward compatibility of serialized formats (if applicable).
|
|
||||||
|
|
||||||
If a change touches concurrency, networking, protocol logic, or state machines,
|
|
||||||
you MUST explain why existing invariants remain valid.
|
|
||||||
|
|
||||||
### 13. Error Handling Policy
|
|
||||||
|
|
||||||
- Do not replace structured errors with generic strings.
|
|
||||||
- Preserve existing error propagation semantics.
|
|
||||||
- Do not widen or narrow error types without approval.
|
|
||||||
- Avoid introducing panics in production paths.
|
|
||||||
- Prefer explicit error mapping over implicit conversions.
|
|
||||||
|
|
||||||
### 14. Test Safety
|
|
||||||
|
|
||||||
- Do not modify existing tests unless the task explicitly requires it.
|
|
||||||
- Do not weaken assertions.
|
|
||||||
- Preserve determinism in testable components.
|
|
||||||
|
|
||||||
### 15. Security Constraints
|
|
||||||
|
|
||||||
- Do not weaken cryptographic assumptions.
|
|
||||||
- Do not modify key derivation logic without explicit request.
|
|
||||||
- Do not change constant-time behavior.
|
|
||||||
- Do not introduce logging of secrets.
|
|
||||||
- Preserve TLS/MTProto protocol correctness.
|
|
||||||
|
|
||||||
### 16. Logging Policy
|
|
||||||
|
|
||||||
- Do not introduce excessive logging in hot paths.
|
|
||||||
- Do not log sensitive data.
|
|
||||||
- Preserve existing log levels and style.
|
|
||||||
|
|
||||||
### 17. Pre-Response Verification Checklist
|
|
||||||
|
|
||||||
Before producing the final answer, verify internally:
|
|
||||||
|
|
||||||
- The change compiles conceptually.
|
|
||||||
- No unresolved symbols exist.
|
|
||||||
- All modified call sites are updated.
|
|
||||||
- No accidental behavioral changes were introduced.
|
|
||||||
- Architectural boundaries remain intact.
|
|
||||||
|
|
||||||
### 18. Atomic Change Principle
|
|
||||||
Every patch must be **atomic and production-safe**.
|
|
||||||
* **Self-contained** — no dependency on future patches or unimplemented components.
|
|
||||||
* **Build-safe** — the project must compile successfully after the change.
|
|
||||||
* **Contract-consistent** — no partial interface or behavioral changes; all dependent code must be updated within the same patch.
|
|
||||||
* **No transitional states** — no placeholders, incomplete refactors, or temporary inconsistencies.
|
|
||||||
|
|
||||||
**Invariant:** After any single patch, the repository remains fully functional and buildable.
|
|
||||||
|
|
||||||
@@ -1,3 +1,8 @@
|
|||||||
|
# Issues - Rules
|
||||||
|
## What it is not
|
||||||
|
- NOT Question and Answer
|
||||||
|
- NOT Helpdesk
|
||||||
|
|
||||||
# Pull Requests - Rules
|
# Pull Requests - Rules
|
||||||
## General
|
## General
|
||||||
- ONLY signed and verified commits
|
- ONLY signed and verified commits
|
||||||
|
|||||||
2
Cargo.lock
generated
2
Cargo.lock
generated
@@ -2087,7 +2087,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "telemt"
|
name = "telemt"
|
||||||
version = "3.0.10"
|
version = "3.3.15"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aes",
|
"aes",
|
||||||
"anyhow",
|
"anyhow",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "telemt"
|
name = "telemt"
|
||||||
version = "3.0.12"
|
version = "3.3.15"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
@@ -73,3 +73,6 @@ futures = "0.3"
|
|||||||
[[bench]]
|
[[bench]]
|
||||||
name = "crypto_bench"
|
name = "crypto_bench"
|
||||||
harness = false
|
harness = false
|
||||||
|
|
||||||
|
[profile.release]
|
||||||
|
lto = "thin"
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# ==========================
|
# ==========================
|
||||||
# Stage 1: Build
|
# Stage 1: Build
|
||||||
# ==========================
|
# ==========================
|
||||||
FROM rust:1.85-slim-bookworm AS builder
|
FROM rust:1.88-slim-bookworm AS builder
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
pkg-config \
|
pkg-config \
|
||||||
|
|||||||
305
README.md
305
README.md
@@ -1,6 +1,15 @@
|
|||||||
# Telemt - MTProxy on Rust + Tokio
|
# Telemt - MTProxy on Rust + Tokio
|
||||||
|
|
||||||
**Telemt** is a fast, secure, and feature-rich server written in Rust: it fully implements the official Telegram proxy algo and adds many production-ready improvements such as connection pooling, replay protection, detailed statistics, masking from "prying" eyes
|
***Löst Probleme, bevor andere überhaupt wissen, dass sie existieren*** / ***It solves problems before others even realize they exist***
|
||||||
|
|
||||||
|
**Telemt** is a fast, secure, and feature-rich server written in Rust: it fully implements the official Telegram proxy algo and adds many production-ready improvements such as:
|
||||||
|
- [ME Pool + Reader/Writer + Registry + Refill + Adaptive Floor + Trio-State + Generation Lifecycle](https://github.com/telemt/telemt/blob/main/docs/model/MODEL.en.md)
|
||||||
|
- [Full-covered API w/ management](https://github.com/telemt/telemt/blob/main/docs/API.md)
|
||||||
|
- Anti-Replay on Sliding Window
|
||||||
|
- Prometheus-format Metrics
|
||||||
|
- TLS-Fronting and TCP-Splicing for masking from "prying" eyes
|
||||||
|
|
||||||
|
[**Telemt Chat in Telegram**](https://t.me/telemtrs)
|
||||||
|
|
||||||
## NEWS and EMERGENCY
|
## NEWS and EMERGENCY
|
||||||
### ✈️ Telemt 3 is released!
|
### ✈️ Telemt 3 is released!
|
||||||
@@ -10,28 +19,24 @@
|
|||||||
|
|
||||||
### 🇷🇺 RU
|
### 🇷🇺 RU
|
||||||
|
|
||||||
#### Драфтинг LTS и текущие улучшения
|
#### Релиз 3.3.5 LTS - 6 марта
|
||||||
|
|
||||||
С 21 февраля мы начали подготовку LTS-версии.
|
6 марта мы выпустили Telemt **3.3.5**
|
||||||
|
|
||||||
Мы внимательно анализируем весь доступный фидбек.
|
Это [3.3.5 - первая LTS-версия telemt](https://github.com/telemt/telemt/releases/tag/3.3.5)!
|
||||||
Наша цель — сделать LTS-кандидаты максимально стабильными, тщательно отлаженными и готовыми к long-run и highload production-сценариям.
|
|
||||||
|
|
||||||
---
|
В ней используется:
|
||||||
|
- новый алгоритм ME NoWait для непревзойдённо быстрого восстановления пула
|
||||||
|
- Adaptive Floor, поддерживающий количество ME Writer на оптимальном уровне
|
||||||
|
- модель усовершенствованного доступа к KDF Fingerprint на RwLock
|
||||||
|
- строгая привязка Middle-End к DC-ID с предсказуемым алгоритмом деградации и самовосстановления
|
||||||
|
|
||||||
#### Улучшения от 23 февраля
|
Telemt Control API V1 в 3.3.5 включает:
|
||||||
|
- несколько режимов работы в зависимости от доступных ресурсов
|
||||||
|
- снапшот-модель для живых метрик без вмешательства в hot-path
|
||||||
|
- минималистичный набор запросов для управления пользователями
|
||||||
|
|
||||||
23 февраля были внесены улучшения производительности в режимах **DC** и **Middle-End (ME)**, с акцентом на обратный канал (путь клиент → DC / ME).
|
Будем рады вашему фидбеку и предложениям по улучшению — особенно в части **API**, **статистики**, **UX**
|
||||||
|
|
||||||
Дополнительно реализован ряд изменений, направленных на повышение устойчивости системы:
|
|
||||||
|
|
||||||
- Смягчение сетевой нестабильности
|
|
||||||
- Повышение устойчивости к десинхронизации криптографии
|
|
||||||
- Снижение дрейфа сессий при неблагоприятных условиях
|
|
||||||
- Улучшение обработки ошибок в edge-case транспортных сценариях
|
|
||||||
|
|
||||||
Релиз:
|
|
||||||
[3.0.9](https://github.com/telemt/telemt/releases/tag/3.0.9)
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -48,28 +53,24 @@
|
|||||||
|
|
||||||
### 🇬🇧 EN
|
### 🇬🇧 EN
|
||||||
|
|
||||||
#### LTS Drafting and Ongoing Improvements
|
#### Release 3.3.5 LTS - March 6
|
||||||
|
|
||||||
Starting February 21, we began drafting the upcoming LTS version.
|
On March 6, we released Telemt **3.3.3**
|
||||||
|
|
||||||
We are carefully reviewing and analyzing all available feedback.
|
This is [3.3.5 - the first LTS release of telemt](https://github.com/telemt/telemt/releases/tag/3.3.5)
|
||||||
The goal is to ensure that LTS candidates are максимально stable, thoroughly debugged, and ready for long-run and high-load production scenarios.
|
|
||||||
|
|
||||||
---
|
It introduces:
|
||||||
|
- the new ME NoWait algorithm for exceptionally fast pool recovery
|
||||||
|
- Adaptive Floor, which maintains the number of ME Writers at an optimal level
|
||||||
|
- an improved KDF Fingerprint access model based on RwLock
|
||||||
|
- strict binding of Middle-End instances to DC-ID with a predictable degradation and self-recovery algorithm
|
||||||
|
|
||||||
#### February 23 Improvements
|
Telemt Control API V1 in version 3.3.5 includes:
|
||||||
|
- multiple operating modes depending on available resources
|
||||||
|
- a snapshot-based model for live metrics without interfering with the hot path
|
||||||
|
- a minimalistic request set for user management
|
||||||
|
|
||||||
On February 23, we introduced performance improvements for both **DC** and **Middle-End (ME)** modes, specifically optimizing the reverse channel (client → DC / ME data path).
|
We are looking forward to your feedback and improvement proposals — especially regarding **API**, **statistics**, **UX**
|
||||||
|
|
||||||
Additionally, we implemented a set of robustness enhancements designed to:
|
|
||||||
|
|
||||||
- Mitigate network-related instability
|
|
||||||
- Improve resilience against cryptographic desynchronization
|
|
||||||
- Reduce session drift under adverse conditions
|
|
||||||
- Improve error handling in edge-case transport scenarios
|
|
||||||
|
|
||||||
Release:
|
|
||||||
[3.0.9](https://github.com/telemt/telemt/releases/tag/3.0.9)
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -92,31 +93,6 @@ We welcome ideas, architectural feedback, and pull requests.
|
|||||||
|
|
||||||
⚓ Our ***Middle-End Pool*** is fastest by design in standard scenarios, compared to other implementations of connecting to the Middle-End Proxy: non dramatically, but usual
|
⚓ Our ***Middle-End Pool*** is fastest by design in standard scenarios, compared to other implementations of connecting to the Middle-End Proxy: non dramatically, but usual
|
||||||
|
|
||||||
# GOTO
|
|
||||||
- [Features](#features)
|
|
||||||
- [Quick Start Guide](#quick-start-guide)
|
|
||||||
- [How to use?](#how-to-use)
|
|
||||||
- [Systemd Method](#telemt-via-systemd)
|
|
||||||
- [Configuration](#configuration)
|
|
||||||
- [Minimal Configuration](#minimal-configuration-for-first-start)
|
|
||||||
- [Advanced](#advanced)
|
|
||||||
- [Adtag](#adtag)
|
|
||||||
- [Listening and Announce IPs](#listening-and-announce-ips)
|
|
||||||
- [Upstream Manager](#upstream-manager)
|
|
||||||
- [IP](#bind-on-ip)
|
|
||||||
- [SOCKS](#socks45-as-upstream)
|
|
||||||
- [FAQ](#faq)
|
|
||||||
- [Recognizability for DPI + crawler](#recognizability-for-dpi-and-crawler)
|
|
||||||
- [Telegram Calls](#telegram-calls-via-mtproxy)
|
|
||||||
- [DPI](#how-does-dpi-see-mtproxy-tls)
|
|
||||||
- [Whitelist on Network Level](#whitelist-on-ip)
|
|
||||||
- [Too many open files](#too-many-open-files)
|
|
||||||
- [Build](#build)
|
|
||||||
- [Docker](#docker)
|
|
||||||
- [Why Rust?](#why-rust)
|
|
||||||
|
|
||||||
## Features
|
|
||||||
|
|
||||||
- Full support for all official MTProto proxy modes:
|
- Full support for all official MTProto proxy modes:
|
||||||
- Classic
|
- Classic
|
||||||
- Secure - with `dd` prefix
|
- Secure - with `dd` prefix
|
||||||
@@ -127,154 +103,31 @@ We welcome ideas, architectural feedback, and pull requests.
|
|||||||
- Graceful shutdown on Ctrl+C
|
- Graceful shutdown on Ctrl+C
|
||||||
- Extensive logging via `trace` and `debug` with `RUST_LOG` method
|
- Extensive logging via `trace` and `debug` with `RUST_LOG` method
|
||||||
|
|
||||||
|
# GOTO
|
||||||
|
- [Quick Start Guide](#quick-start-guide)
|
||||||
|
- [FAQ](#faq)
|
||||||
|
- [Recognizability for DPI and crawler](#recognizability-for-dpi-and-crawler)
|
||||||
|
- [Client WITH secret-key accesses the MTProxy resource:](#client-with-secret-key-accesses-the-mtproxy-resource)
|
||||||
|
- [Client WITHOUT secret-key gets transparent access to the specified resource:](#client-without-secret-key-gets-transparent-access-to-the-specified-resource)
|
||||||
|
- [Telegram Calls via MTProxy](#telegram-calls-via-mtproxy)
|
||||||
|
- [How does DPI see MTProxy TLS?](#how-does-dpi-see-mtproxy-tls)
|
||||||
|
- [Whitelist on IP](#whitelist-on-ip)
|
||||||
|
- [Too many open files](#too-many-open-files)
|
||||||
|
- [Build](#build)
|
||||||
|
- [Why Rust?](#why-rust)
|
||||||
|
- [Issues](#issues)
|
||||||
|
- [Roadmap](#roadmap)
|
||||||
|
|
||||||
|
|
||||||
## Quick Start Guide
|
## Quick Start Guide
|
||||||
**This software is designed for Debian-based OS: in addition to Debian, these are Ubuntu, Mint, Kali, MX and many other Linux**
|
- [Quick Start Guide RU](docs/QUICK_START_GUIDE.ru.md)
|
||||||
1. Download release
|
- [Quick Start Guide EN](docs/QUICK_START_GUIDE.en.md)
|
||||||
```bash
|
|
||||||
wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz
|
|
||||||
```
|
|
||||||
2. Move to Bin Folder
|
|
||||||
```bash
|
|
||||||
mv telemt /bin
|
|
||||||
```
|
|
||||||
4. Make Executable
|
|
||||||
```bash
|
|
||||||
chmod +x /bin/telemt
|
|
||||||
```
|
|
||||||
5. Go to [How to use?](#how-to-use) section for for further steps
|
|
||||||
|
|
||||||
## How to use?
|
|
||||||
### Telemt via Systemd
|
|
||||||
**This instruction "assume" that you:**
|
|
||||||
- logged in as root or executed `su -` / `sudo su`
|
|
||||||
- you already have an assembled and executable `telemt` in /bin folder as a result of the [Quick Start Guide](#quick-start-guide) or [Build](#build)
|
|
||||||
|
|
||||||
**0. Check port and generate secrets**
|
|
||||||
|
|
||||||
The port you have selected for use should be MISSING from the list, when:
|
|
||||||
```bash
|
|
||||||
netstat -lnp
|
|
||||||
```
|
|
||||||
|
|
||||||
Generate 16 bytes/32 characters HEX with OpenSSL or another way:
|
|
||||||
```bash
|
|
||||||
openssl rand -hex 16
|
|
||||||
```
|
|
||||||
OR
|
|
||||||
```bash
|
|
||||||
xxd -l 16 -p /dev/urandom
|
|
||||||
```
|
|
||||||
OR
|
|
||||||
```bash
|
|
||||||
python3 -c 'import os; print(os.urandom(16).hex())'
|
|
||||||
```
|
|
||||||
|
|
||||||
**1. Place your config to /etc/telemt.toml**
|
|
||||||
|
|
||||||
Open nano
|
|
||||||
```bash
|
|
||||||
nano /etc/telemt.toml
|
|
||||||
```
|
|
||||||
paste your config from [Configuration](#configuration) section
|
|
||||||
|
|
||||||
then Ctrl+X -> Y -> Enter to save
|
|
||||||
|
|
||||||
**2. Create service on /etc/systemd/system/telemt.service**
|
|
||||||
|
|
||||||
Open nano
|
|
||||||
```bash
|
|
||||||
nano /etc/systemd/system/telemt.service
|
|
||||||
```
|
|
||||||
paste this Systemd Module
|
|
||||||
```bash
|
|
||||||
[Unit]
|
|
||||||
Description=Telemt
|
|
||||||
After=network.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
WorkingDirectory=/bin
|
|
||||||
ExecStart=/bin/telemt /etc/telemt.toml
|
|
||||||
Restart=on-failure
|
|
||||||
LimitNOFILE=65536
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
```
|
|
||||||
then Ctrl+X -> Y -> Enter to save
|
|
||||||
|
|
||||||
**3.** In Shell type `systemctl start telemt` - it must start with zero exit-code
|
|
||||||
|
|
||||||
**4.** In Shell type `systemctl status telemt` - there you can reach info about current MTProxy status
|
|
||||||
|
|
||||||
**5.** In Shell type `systemctl enable telemt` - then telemt will start with system startup, after the network is up
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
### Minimal Configuration for First Start
|
|
||||||
```toml
|
|
||||||
# === General Settings ===
|
|
||||||
[general]
|
|
||||||
# ad_tag = "00000000000000000000000000000000"
|
|
||||||
|
|
||||||
[general.modes]
|
|
||||||
classic = false
|
|
||||||
secure = false
|
|
||||||
tls = true
|
|
||||||
|
|
||||||
# === Anti-Censorship & Masking ===
|
|
||||||
[censorship]
|
|
||||||
tls_domain = "petrovich.ru"
|
|
||||||
|
|
||||||
[access.users]
|
|
||||||
# format: "username" = "32_hex_chars_secret"
|
|
||||||
hello = "00000000000000000000000000000000"
|
|
||||||
|
|
||||||
```
|
|
||||||
### Advanced
|
|
||||||
#### Adtag
|
|
||||||
To use channel advertising and usage statistics from Telegram, get Adtag from [@mtproxybot](https://t.me/mtproxybot), add this parameter to section `[General]`
|
|
||||||
```toml
|
|
||||||
ad_tag = "00000000000000000000000000000000" # Replace zeros to your adtag from @mtproxybot
|
|
||||||
```
|
|
||||||
#### Listening and Announce IPs
|
|
||||||
To specify listening address and/or address in links, add to section `[[server.listeners]]` of config.toml:
|
|
||||||
```toml
|
|
||||||
[[server.listeners]]
|
|
||||||
ip = "0.0.0.0" # 0.0.0.0 = all IPs; your IP = specific listening
|
|
||||||
announce_ip = "1.2.3.4" # IP in links; comment with # if not used
|
|
||||||
```
|
|
||||||
#### Upstream Manager
|
|
||||||
To specify upstream, add to section `[[upstreams]]` of config.toml:
|
|
||||||
##### Bind on IP
|
|
||||||
```toml
|
|
||||||
[[upstreams]]
|
|
||||||
type = "direct"
|
|
||||||
weight = 1
|
|
||||||
enabled = true
|
|
||||||
interface = "192.168.1.100" # Change to your outgoing IP
|
|
||||||
```
|
|
||||||
##### SOCKS4/5 as Upstream
|
|
||||||
- Without Auth:
|
|
||||||
```toml
|
|
||||||
[[upstreams]]
|
|
||||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
|
||||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
|
||||||
weight = 1 # Set Weight for Scenarios
|
|
||||||
enabled = true
|
|
||||||
```
|
|
||||||
|
|
||||||
- With Auth:
|
|
||||||
```toml
|
|
||||||
[[upstreams]]
|
|
||||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
|
||||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
|
||||||
username = "user" # Username for Auth on SOCKS-server
|
|
||||||
password = "pass" # Password for Auth on SOCKS-server
|
|
||||||
weight = 1 # Set Weight for Scenarios
|
|
||||||
enabled = true
|
|
||||||
```
|
|
||||||
|
|
||||||
## FAQ
|
## FAQ
|
||||||
|
|
||||||
|
- [FAQ RU](docs/FAQ.ru.md)
|
||||||
|
- [FAQ EN](docs/FAQ.en.md)
|
||||||
|
|
||||||
### Recognizability for DPI and crawler
|
### Recognizability for DPI and crawler
|
||||||
Since version 1.1.0.0, we have debugged masking perfectly: for all clients without "presenting" a key,
|
Since version 1.1.0.0, we have debugged masking perfectly: for all clients without "presenting" a key,
|
||||||
we transparently direct traffic to the target host!
|
we transparently direct traffic to the target host!
|
||||||
@@ -411,6 +264,11 @@ git clone https://github.com/telemt/telemt
|
|||||||
cd telemt
|
cd telemt
|
||||||
# Starting Release Build
|
# Starting Release Build
|
||||||
cargo build --release
|
cargo build --release
|
||||||
|
|
||||||
|
# Low-RAM devices (1 GB, e.g. NanoPi Neo3 / Raspberry Pi Zero 2):
|
||||||
|
# release profile uses lto = "thin" to reduce peak linker memory.
|
||||||
|
# If your custom toolchain overrides profiles, avoid enabling fat LTO.
|
||||||
|
|
||||||
# Move to /bin
|
# Move to /bin
|
||||||
mv ./target/release/telemt /bin
|
mv ./target/release/telemt /bin
|
||||||
# Make executable
|
# Make executable
|
||||||
@@ -419,40 +277,11 @@ chmod +x /bin/telemt
|
|||||||
telemt config.toml
|
telemt config.toml
|
||||||
```
|
```
|
||||||
|
|
||||||
## Docker
|
### OpenBSD
|
||||||
**Quick start (Docker Compose)**
|
- Build and service setup guide: [OpenBSD Guide (EN)](docs/OPENBSD.en.md)
|
||||||
|
- Example rc.d script: [contrib/openbsd/telemt.rcd](contrib/openbsd/telemt.rcd)
|
||||||
|
- Status: OpenBSD sandbox hardening with `pledge(2)` and `unveil(2)` is not implemented yet.
|
||||||
|
|
||||||
1. Edit `config.toml` in repo root (at least: port, users secrets, tls_domain)
|
|
||||||
2. Start container:
|
|
||||||
```bash
|
|
||||||
docker compose up -d --build
|
|
||||||
```
|
|
||||||
3. Check logs:
|
|
||||||
```bash
|
|
||||||
docker compose logs -f telemt
|
|
||||||
```
|
|
||||||
4. Stop:
|
|
||||||
```bash
|
|
||||||
docker compose down
|
|
||||||
```
|
|
||||||
|
|
||||||
**Notes**
|
|
||||||
- `docker-compose.yml` maps `./config.toml` to `/app/config.toml` (read-only)
|
|
||||||
- By default it publishes `443:443` and runs with dropped capabilities (only `NET_BIND_SERVICE` is added)
|
|
||||||
- If you really need host networking (usually only for some IPv6 setups) uncomment `network_mode: host`
|
|
||||||
|
|
||||||
**Run without Compose**
|
|
||||||
```bash
|
|
||||||
docker build -t telemt:local .
|
|
||||||
docker run --name telemt --restart unless-stopped \
|
|
||||||
-p 443:443 \
|
|
||||||
-e RUST_LOG=info \
|
|
||||||
-v "$PWD/config.toml:/app/config.toml:ro" \
|
|
||||||
--read-only \
|
|
||||||
--cap-drop ALL --cap-add NET_BIND_SERVICE \
|
|
||||||
--ulimit nofile=65536:65536 \
|
|
||||||
telemt:local
|
|
||||||
```
|
|
||||||
|
|
||||||
## Why Rust?
|
## Why Rust?
|
||||||
- Long-running reliability and idempotent behavior
|
- Long-running reliability and idempotent behavior
|
||||||
|
|||||||
697
config.full.toml
Normal file
697
config.full.toml
Normal file
@@ -0,0 +1,697 @@
|
|||||||
|
# ==============================================================================
|
||||||
|
#
|
||||||
|
# TELEMT — Advanced Rust-based Telegram MTProto Proxy
|
||||||
|
# Full Configuration Reference
|
||||||
|
#
|
||||||
|
# This file is both a working config and a complete documentation.
|
||||||
|
# Every parameter is explained. Read it top to bottom before deploying.
|
||||||
|
#
|
||||||
|
# Quick Start:
|
||||||
|
# 1. Set [server].port to your desired port (443 recommended)
|
||||||
|
# 2. Generate a secret: openssl rand -hex 16
|
||||||
|
# 3. Put it in [access.users] under a name you choose
|
||||||
|
# 4. Set [censorship].tls_domain to a popular unblocked HTTPS site
|
||||||
|
# 5. Set your public IP in [general].middle_proxy_nat_ip
|
||||||
|
# and [general.links].public_host
|
||||||
|
# 6. Set announce IP in [[server.listeners]]
|
||||||
|
# 7. Run Telemt. It prints a tg:// link. Send it to your users.
|
||||||
|
#
|
||||||
|
# Modes of Operation:
|
||||||
|
# Direct Mode (use_middle_proxy = false)
|
||||||
|
# Connects straight to Telegram DCs via TCP. Simple, fast, low overhead.
|
||||||
|
# No ad_tag support. No CDN DC support (203, etc).
|
||||||
|
#
|
||||||
|
# Middle-Proxy Mode (use_middle_proxy = true)
|
||||||
|
# Connects to Telegram Middle-End servers via RPC protocol.
|
||||||
|
# Required for ad_tag monetization and CDN support.
|
||||||
|
# Requires proxy_secret_path and a valid public IP.
|
||||||
|
#
|
||||||
|
# ==============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
# ==============================================================================
|
||||||
|
# LEGACY TOP-LEVEL FIELDS
|
||||||
|
# ==============================================================================
|
||||||
|
|
||||||
|
# Deprecated. Use [general.links].show instead.
|
||||||
|
# Accepts "*" for all users, or an array like ["alice", "bob"].
|
||||||
|
show_link = ["0"]
|
||||||
|
|
||||||
|
# Fallback Datacenter index (1-5) when a client requests an unknown DC ID.
|
||||||
|
# DC 2 is Amsterdam (Europe), closest for most CIS users.
|
||||||
|
# default_dc = 2
|
||||||
|
|
||||||
|
|
||||||
|
# ==============================================================================
|
||||||
|
# GENERAL SETTINGS
|
||||||
|
# ==============================================================================
|
||||||
|
|
||||||
|
[general]
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Core Protocol
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Coalesce the MTProto handshake and first data payload into a single TCP packet.
|
||||||
|
# Significantly reduces connection latency. No reason to disable.
|
||||||
|
fast_mode = true
|
||||||
|
|
||||||
|
# How the proxy connects to Telegram servers.
|
||||||
|
# false = Direct TCP to Telegram DCs (simple, low overhead)
|
||||||
|
# true = Middle-End RPC protocol (required for ad_tag and CDN DCs)
|
||||||
|
use_middle_proxy = true
|
||||||
|
|
||||||
|
# 32-char hex Ad-Tag from @MTProxybot for sponsored channel injection.
|
||||||
|
# Only works when use_middle_proxy = true.
|
||||||
|
# Obtain yours: message @MTProxybot on Telegram, register your proxy.
|
||||||
|
# ad_tag = "00000000000000000000000000000000"
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Middle-End Authentication
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Path to the Telegram infrastructure AES key file.
|
||||||
|
# Auto-downloaded from https://core.telegram.org/getProxySecret on first run.
|
||||||
|
# This key authenticates your proxy with Middle-End servers.
|
||||||
|
proxy_secret_path = "proxy-secret"
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public IP Configuration (Critical for Middle-Proxy Mode)
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Your server's PUBLIC IPv4 address.
|
||||||
|
# Middle-End servers need this for the cryptographic Key Derivation Function.
|
||||||
|
# If your server has a direct public IP, set it here.
|
||||||
|
# If behind NAT (AWS, Docker, etc.), this MUST be your external IP.
|
||||||
|
# If omitted, Telemt uses STUN to auto-detect (see middle_proxy_nat_probe).
|
||||||
|
# middle_proxy_nat_ip = "203.0.113.10"
|
||||||
|
|
||||||
|
# Auto-detect public IP via STUN servers defined in [network].
|
||||||
|
# Set to false if you hardcoded middle_proxy_nat_ip above.
|
||||||
|
# Set to true if you want automatic detection.
|
||||||
|
middle_proxy_nat_probe = true
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Middle-End Connection Pool
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Number of persistent multiplexed RPC connections to ME servers.
|
||||||
|
# All client traffic is routed through these "fat pipes".
|
||||||
|
# 8 handles thousands of concurrent users comfortably.
|
||||||
|
middle_proxy_pool_size = 8
|
||||||
|
|
||||||
|
# Legacy field. Connections kept initialized but idle as warm standby.
|
||||||
|
middle_proxy_warm_standby = 16
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Middle-End Keepalive
|
||||||
|
# Telegram ME servers aggressively kill idle TCP connections.
|
||||||
|
# These settings send periodic RPC_PING frames to keep pipes alive.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
me_keepalive_enabled = true
|
||||||
|
|
||||||
|
# Base interval between pings in seconds.
|
||||||
|
me_keepalive_interval_secs = 25
|
||||||
|
|
||||||
|
# Random jitter added to interval to prevent all connections pinging simultaneously.
|
||||||
|
me_keepalive_jitter_secs = 5
|
||||||
|
|
||||||
|
# Randomize ping payload bytes to prevent DPI from fingerprinting ping patterns.
|
||||||
|
me_keepalive_payload_random = true
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Client-Side Limits
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Max buffered ciphertext per client (bytes) when upstream is slow.
|
||||||
|
# Acts as backpressure to prevent memory exhaustion. 256KB is safe.
|
||||||
|
crypto_pending_buffer = 262144
|
||||||
|
|
||||||
|
# Maximum single MTProto frame size from client. 16MB is protocol standard.
|
||||||
|
max_client_frame = 16777216
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Crypto Desynchronization Logging
|
||||||
|
# Desync errors usually mean DPI/GFW is tampering with connections.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# true = full forensics (trace ID, IP hash, hex dumps) for EVERY desync event
|
||||||
|
# false = deduplicated logging, one entry per time window (prevents log spam)
|
||||||
|
# Set true if you are actively debugging DPI interference.
|
||||||
|
desync_all_full = true
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Beobachten — Built-in Honeypot / Active Probe Tracker
|
||||||
|
# Tracks IPs that fail handshakes or behave like TLS scanners.
|
||||||
|
# Output file can be fed into fail2ban or iptables for auto-blocking.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
beobachten = true
|
||||||
|
|
||||||
|
# How long (minutes) to remember a suspicious IP before expiring it.
|
||||||
|
beobachten_minutes = 30
|
||||||
|
|
||||||
|
# How often (seconds) to flush tracker state to disk.
|
||||||
|
beobachten_flush_secs = 15
|
||||||
|
|
||||||
|
# File path for the tracker output.
|
||||||
|
beobachten_file = "cache/beobachten.txt"
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Hardswap — Zero-Downtime ME Pool Rotation
|
||||||
|
# When Telegram updates ME server IPs, Hardswap creates a completely new pool,
|
||||||
|
# waits until it is fully ready, migrates traffic, then kills the old pool.
|
||||||
|
# Users experience zero interruption.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
hardswap = true
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# ME Pool Warmup Staggering
|
||||||
|
# When creating a new pool, connections are opened one by one with delays
|
||||||
|
# to avoid a burst of SYN packets that could trigger ISP flood protection.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
me_warmup_stagger_enabled = true
|
||||||
|
|
||||||
|
# Delay between each connection creation (milliseconds).
|
||||||
|
me_warmup_step_delay_ms = 500
|
||||||
|
|
||||||
|
# Random jitter added to the delay (milliseconds).
|
||||||
|
me_warmup_step_jitter_ms = 300
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# ME Reconnect Backoff
|
||||||
|
# If an ME server drops the connection, Telemt retries with this strategy.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Max simultaneous reconnect attempts per DC.
|
||||||
|
me_reconnect_max_concurrent_per_dc = 8
|
||||||
|
|
||||||
|
# Exponential backoff base (milliseconds).
|
||||||
|
me_reconnect_backoff_base_ms = 500
|
||||||
|
|
||||||
|
# Backoff ceiling (milliseconds). Will never wait longer than this.
|
||||||
|
me_reconnect_backoff_cap_ms = 30000
|
||||||
|
|
||||||
|
# Number of instant retries before switching to exponential backoff.
|
||||||
|
me_reconnect_fast_retry_count = 12
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# NAT Mismatch Behavior
|
||||||
|
# If STUN-detected IP differs from local interface IP (you are behind NAT).
|
||||||
|
# false = abort ME mode (safe default)
|
||||||
|
# true = force ME mode anyway (use if you know your NAT setup is correct)
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
stun_iface_mismatch_ignore = false
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Logging
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# File to log unknown DC requests (DC IDs outside standard 1-5).
|
||||||
|
unknown_dc_log_path = "unknown-dc.txt"
|
||||||
|
|
||||||
|
# Verbosity: "debug" | "verbose" | "normal" | "silent"
|
||||||
|
log_level = "normal"
|
||||||
|
|
||||||
|
# Disable ANSI color codes in log output (useful for file logging).
|
||||||
|
disable_colors = false
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# FakeTLS Record Sizing
|
||||||
|
# Buffer small MTProto packets into larger TLS records to mimic real HTTPS.
|
||||||
|
# Real HTTPS servers send records close to MTU size (~1400 bytes).
|
||||||
|
# A stream of tiny TLS records is a strong DPI signal.
|
||||||
|
# Set to 0 to disable. Set to 1400 for realistic HTTPS emulation.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
fast_mode_min_tls_record = 1400
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Periodic Updates
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# How often (seconds) to re-fetch ME server lists and proxy secrets
|
||||||
|
# from core.telegram.org. Keeps your proxy in sync with Telegram infrastructure.
|
||||||
|
update_every = 300
|
||||||
|
|
||||||
|
# How often (seconds) to force a Hardswap even if the ME map is unchanged.
|
||||||
|
# Shorter intervals mean shorter-lived TCP flows, harder for DPI to profile.
|
||||||
|
me_reinit_every_secs = 600
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Hardswap Warmup Tuning
|
||||||
|
# Fine-grained control over how the new pool is warmed up before traffic switch.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
me_hardswap_warmup_delay_min_ms = 1000
|
||||||
|
me_hardswap_warmup_delay_max_ms = 2000
|
||||||
|
me_hardswap_warmup_extra_passes = 3
|
||||||
|
me_hardswap_warmup_pass_backoff_base_ms = 500
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Config Update Debouncing
|
||||||
|
# Telegram sometimes pushes transient/broken configs. Debouncing requires
|
||||||
|
# N consecutive identical fetches before applying a change.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# ME server list must be identical for this many fetches before applying.
|
||||||
|
me_config_stable_snapshots = 2
|
||||||
|
|
||||||
|
# Minimum seconds between config applications.
|
||||||
|
me_config_apply_cooldown_secs = 300
|
||||||
|
|
||||||
|
# Proxy secret must be identical for this many fetches before applying.
|
||||||
|
proxy_secret_stable_snapshots = 2
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Proxy Secret Rotation
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Apply newly downloaded secrets at runtime without restart.
|
||||||
|
proxy_secret_rotate_runtime = true
|
||||||
|
|
||||||
|
# Maximum acceptable secret length (bytes). Rejects abnormally large secrets.
|
||||||
|
proxy_secret_len_max = 256
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Hardswap Drain Settings
|
||||||
|
# Controls graceful shutdown of old ME connections during pool rotation.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Seconds to keep old connections alive for in-flight data before force-closing.
|
||||||
|
me_pool_drain_ttl_secs = 90
|
||||||
|
|
||||||
|
# Minimum ratio of healthy connections in new pool before draining old pool.
|
||||||
|
# 0.8 = at least 80% of new pool must be ready.
|
||||||
|
me_pool_min_fresh_ratio = 0.8
|
||||||
|
|
||||||
|
# Maximum seconds to wait for drain to complete before force-killing.
|
||||||
|
me_reinit_drain_timeout_secs = 120
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# NTP Clock Check
|
||||||
|
# MTProto uses timestamps. Clock drift > 30 seconds breaks handshakes.
|
||||||
|
# Telemt checks on startup and warns if out of sync.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
ntp_check = true
|
||||||
|
ntp_servers = ["pool.ntp.org"]
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Auto-Degradation
|
||||||
|
# If ME servers become completely unreachable (ISP blocking),
|
||||||
|
# automatically fall back to Direct Mode so users stay connected.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
auto_degradation_enabled = true
|
||||||
|
|
||||||
|
# Number of DC groups that must be unreachable before triggering fallback.
|
||||||
|
degradation_min_unavailable_dc_groups = 2
|
||||||
|
|
||||||
|
|
||||||
|
# ==============================================================================
|
||||||
|
# ALLOWED CLIENT PROTOCOLS
|
||||||
|
# Only enable what you need. In censored regions, TLS-only is safest.
|
||||||
|
# ==============================================================================
|
||||||
|
|
||||||
|
[general.modes]
|
||||||
|
|
||||||
|
# Classic MTProto. Unobfuscated length prefixes. Trivially detected by DPI.
|
||||||
|
# No reason to enable unless you have ancient clients.
|
||||||
|
classic = false
|
||||||
|
|
||||||
|
# Obfuscated MTProto with randomized padding. Better than classic, but
|
||||||
|
# still detectable by statistical analysis of packet sizes.
|
||||||
|
secure = false
|
||||||
|
|
||||||
|
# FakeTLS (ee-secrets). Wraps MTProto in TLS 1.3 framing.
|
||||||
|
# To DPI, it looks like a normal HTTPS connection.
|
||||||
|
# This should be the ONLY enabled mode in censored environments.
|
||||||
|
tls = true
|
||||||
|
|
||||||
|
|
||||||
|
# ==============================================================================
|
||||||
|
# STARTUP LINK GENERATION
|
||||||
|
# Controls what tg:// invite links are printed to console on startup.
|
||||||
|
# ==============================================================================
|
||||||
|
|
||||||
|
[general.links]
|
||||||
|
|
||||||
|
# Which users to generate links for.
|
||||||
|
# "*" = all users, or an array like ["alice", "bob"].
|
||||||
|
show = "*"
|
||||||
|
|
||||||
|
# IP or domain to embed in the tg:// link.
|
||||||
|
# If omitted, Telemt uses STUN to auto-detect.
|
||||||
|
# Set this to your server's public IP or domain for reliable links.
|
||||||
|
# public_host = "proxy.example.com"
|
||||||
|
|
||||||
|
# Port to embed in the tg:// link.
|
||||||
|
# If omitted, uses [server].port.
|
||||||
|
# public_port = 443
|
||||||
|
|
||||||
|
|
||||||
|
# ==============================================================================
|
||||||
|
# NETWORK & IP RESOLUTION
|
||||||
|
# ==============================================================================
|
||||||
|
|
||||||
|
[network]
|
||||||
|
|
||||||
|
# Enable IPv4 for outbound connections to Telegram.
|
||||||
|
ipv4 = true
|
||||||
|
|
||||||
|
# Enable IPv6 for outbound connections to Telegram.
|
||||||
|
ipv6 = false
|
||||||
|
|
||||||
|
# Prefer IPv4 (4) or IPv6 (6) when both are available.
|
||||||
|
prefer = 4
|
||||||
|
|
||||||
|
# Experimental: use both IPv4 and IPv6 ME servers simultaneously.
|
||||||
|
# May improve reliability but doubles connection count.
|
||||||
|
multipath = false
|
||||||
|
|
||||||
|
# STUN servers for external IP discovery.
|
||||||
|
# Used for Middle-Proxy KDF (if nat_probe=true) and link generation.
|
||||||
|
stun_servers = [
|
||||||
|
"stun.l.google.com:5349",
|
||||||
|
"stun1.l.google.com:3478",
|
||||||
|
"stun.gmx.net:3478",
|
||||||
|
"stun.l.google.com:19302"
|
||||||
|
]
|
||||||
|
|
||||||
|
# If UDP STUN is blocked, attempt TCP-based STUN as fallback.
|
||||||
|
stun_tcp_fallback = true
|
||||||
|
|
||||||
|
# If all STUN fails, use HTTP APIs to discover public IP.
|
||||||
|
http_ip_detect_urls = [
|
||||||
|
"https://ifconfig.me/ip",
|
||||||
|
"https://api.ipify.org"
|
||||||
|
]
|
||||||
|
|
||||||
|
# Cache discovered public IP to this file to survive restarts.
|
||||||
|
cache_public_ip_path = "cache/public_ip.txt"
|
||||||
|
|
||||||
|
|
||||||
|
# ==============================================================================
|
||||||
|
# SERVER BINDING & METRICS
|
||||||
|
# ==============================================================================
|
||||||
|
|
||||||
|
[server]
|
||||||
|
|
||||||
|
# TCP port to listen on.
|
||||||
|
# 443 is recommended (looks like normal HTTPS traffic).
|
||||||
|
port = 443
|
||||||
|
|
||||||
|
# IPv4 bind address. "0.0.0.0" = all interfaces.
|
||||||
|
listen_addr_ipv4 = "0.0.0.0"
|
||||||
|
|
||||||
|
# IPv6 bind address. "::" = all interfaces.
|
||||||
|
listen_addr_ipv6 = "::"
|
||||||
|
|
||||||
|
# Unix socket listener (for reverse proxy setups with Nginx/HAProxy).
|
||||||
|
# listen_unix_sock = "/var/run/telemt.sock"
|
||||||
|
# listen_unix_sock_perm = "0660"
|
||||||
|
|
||||||
|
# Enable PROXY protocol header parsing.
|
||||||
|
# Set true ONLY if Telemt is behind HAProxy/Nginx that injects PROXY headers.
|
||||||
|
# If enabled without a proxy in front, clients will fail to connect.
|
||||||
|
proxy_protocol = false
|
||||||
|
|
||||||
|
# Prometheus metrics HTTP endpoint port.
|
||||||
|
# Uncomment to enable. Access at http://your-server:9090/metrics
|
||||||
|
# metrics_port = 9090
|
||||||
|
|
||||||
|
# IP ranges allowed to access the metrics endpoint.
|
||||||
|
metrics_whitelist = [
|
||||||
|
"127.0.0.1/32",
|
||||||
|
"::1/128"
|
||||||
|
]
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Listener Overrides
|
||||||
|
# Define explicit listeners with specific bind IPs and announce IPs.
|
||||||
|
# The announce IP is what gets embedded in tg:// links and sent to ME servers.
|
||||||
|
# You MUST set announce to your server's public IP for ME mode to work.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# [[server.listeners]]
|
||||||
|
# ip = "0.0.0.0"
|
||||||
|
# announce = "203.0.113.10"
|
||||||
|
# reuse_allow = false
|
||||||
|
|
||||||
|
|
||||||
|
# ==============================================================================
|
||||||
|
# TIMEOUTS (seconds unless noted)
|
||||||
|
# ==============================================================================
|
||||||
|
|
||||||
|
[timeouts]
|
||||||
|
|
||||||
|
# Maximum time for client to complete FakeTLS + MTProto handshake.
|
||||||
|
client_handshake = 15
|
||||||
|
|
||||||
|
# Maximum time to establish TCP connection to upstream Telegram DC.
|
||||||
|
tg_connect = 10
|
||||||
|
|
||||||
|
# TCP keepalive interval for client connections.
|
||||||
|
client_keepalive = 60
|
||||||
|
|
||||||
|
# Maximum client inactivity before dropping the connection.
|
||||||
|
client_ack = 300
|
||||||
|
|
||||||
|
# Instant retry count for a single ME endpoint before giving up on it.
|
||||||
|
me_one_retry = 3
|
||||||
|
|
||||||
|
# Timeout (milliseconds) for a single ME endpoint connection attempt.
|
||||||
|
me_one_timeout_ms = 1500
|
||||||
|
|
||||||
|
|
||||||
|
# ==============================================================================
|
||||||
|
# ANTI-CENSORSHIP / FAKETLS / MASKING
|
||||||
|
# This is where Telemt becomes invisible to Deep Packet Inspection.
|
||||||
|
# ==============================================================================
|
||||||
|
|
||||||
|
[censorship]
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# TLS Domain Fronting
|
||||||
|
# The SNI (Server Name Indication) your proxy presents to connecting clients.
|
||||||
|
# Must be a popular, unblocked HTTPS website in your target country.
|
||||||
|
# DPI sees traffic to this domain. Choose carefully.
|
||||||
|
# Good choices: major CDNs, banks, government sites, search engines.
|
||||||
|
# Bad choices: obscure sites, already-blocked domains.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
tls_domain = "www.google.com"
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Active Probe Masking
|
||||||
|
# When someone connects but fails the MTProto handshake (wrong secret),
|
||||||
|
# they might be an ISP active prober testing if this is a proxy.
|
||||||
|
#
|
||||||
|
# mask = false: drop the connection (prober knows something is here)
|
||||||
|
# mask = true: transparently proxy them to mask_host (prober sees a real website)
|
||||||
|
#
|
||||||
|
# With mask enabled, your server is indistinguishable from a real web server
|
||||||
|
# to anyone who doesn't have the correct secret.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
mask = true
|
||||||
|
|
||||||
|
# The real web server to forward failed handshakes to.
|
||||||
|
# If omitted, defaults to tls_domain.
|
||||||
|
# mask_host = "www.google.com"
|
||||||
|
|
||||||
|
# Port on the mask host to connect to.
|
||||||
|
mask_port = 443
|
||||||
|
|
||||||
|
# Inject PROXY protocol header when forwarding to mask host.
|
||||||
|
# 0 = disabled, 1 = v1, 2 = v2. Leave disabled unless mask_host expects it.
|
||||||
|
# mask_proxy_protocol = 0
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# TLS Certificate Emulation
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Size (bytes) of the locally generated fake TLS certificate.
|
||||||
|
# Only used when tls_emulation is disabled.
|
||||||
|
fake_cert_len = 2048
|
||||||
|
|
||||||
|
# KILLER FEATURE: Real-Time TLS Emulation.
|
||||||
|
# Telemt connects to tls_domain, fetches its actual TLS 1.3 certificate chain,
|
||||||
|
# and exactly replicates the byte sizes of ServerHello and Certificate records.
|
||||||
|
# Defeats DPI that uses TLS record length heuristics to detect proxies.
|
||||||
|
# Strongly recommended in censored environments.
|
||||||
|
tls_emulation = true
|
||||||
|
|
||||||
|
# Directory to cache fetched TLS certificates.
|
||||||
|
tls_front_dir = "tlsfront"
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# ServerHello Timing
|
||||||
|
# Real web servers take 30-150ms to respond to ClientHello due to network
|
||||||
|
# latency and crypto processing. A proxy responding in <1ms is suspicious.
|
||||||
|
# These settings add realistic delay to mimic genuine server behavior.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Minimum delay before sending ServerHello (milliseconds).
|
||||||
|
server_hello_delay_min_ms = 50
|
||||||
|
|
||||||
|
# Maximum delay before sending ServerHello (milliseconds).
|
||||||
|
server_hello_delay_max_ms = 150
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# TLS Session Tickets
|
||||||
|
# Real TLS 1.3 servers send 1-2 NewSessionTicket messages after handshake.
|
||||||
|
# A server that sends zero tickets is anomalous and may trigger DPI flags.
|
||||||
|
# Set this to match your tls_domain's behavior (usually 2).
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# tls_new_session_tickets = 0
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Full Certificate Frequency
|
||||||
|
# When tls_emulation is enabled, this controls how often (per client IP)
|
||||||
|
# to send the complete emulated certificate chain.
|
||||||
|
#
|
||||||
|
# > 0: Subsequent connections within TTL seconds get a smaller cached version.
|
||||||
|
# Saves bandwidth but creates a detectable size difference between
|
||||||
|
# first and repeat connections.
|
||||||
|
#
|
||||||
|
# = 0: Every connection gets the full certificate. More bandwidth but
|
||||||
|
# perfectly consistent behavior, no anomalies for DPI to detect.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
tls_full_cert_ttl_secs = 0
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# ALPN Enforcement
|
||||||
|
# Ensure ServerHello responds with the exact ALPN protocol the client requested.
|
||||||
|
# Mismatched ALPN (e.g., client asks h2, server says http/1.1) is a DPI red flag.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
alpn_enforce = true
|
||||||
|
|
||||||
|
|
||||||
|
# ==============================================================================
|
||||||
|
# ACCESS CONTROL & USERS
|
||||||
|
# ==============================================================================
|
||||||
|
|
||||||
|
[access]
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Replay Attack Protection
|
||||||
|
# DPI can record a legitimate user's handshake and replay it later to probe
|
||||||
|
# whether the server is a proxy. Telemt remembers recent handshake nonces
|
||||||
|
# and rejects duplicates.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Number of nonce slots in the replay detection buffer.
|
||||||
|
replay_check_len = 65536
|
||||||
|
|
||||||
|
# How long (seconds) to remember nonces before expiring them.
|
||||||
|
replay_window_secs = 1800
|
||||||
|
|
||||||
|
# Allow clients with incorrect system clocks to connect.
|
||||||
|
# false = reject clients with significant time skew (more secure)
|
||||||
|
# true = accept anyone regardless of clock (more permissive)
|
||||||
|
ignore_time_skew = false
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# User Secrets
|
||||||
|
# Each user needs a unique 32-character hex string as their secret.
|
||||||
|
# Generate with: openssl rand -hex 16
|
||||||
|
#
|
||||||
|
# This secret is embedded in the tg:// link. Anyone with it can connect.
|
||||||
|
# Format: username = "hex_secret"
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
[access.users]
|
||||||
|
# alice = "0123456789abcdef0123456789abcdef"
|
||||||
|
# bob = "fedcba9876543210fedcba9876543210"
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Per-User Connection Limits
|
||||||
|
# Limits concurrent TCP connections per user to prevent secret sharing.
|
||||||
|
# Uncomment and set for each user as needed.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
[access.user_max_tcp_conns]
|
||||||
|
# alice = 100
|
||||||
|
# bob = 50
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Per-User Expiration Dates
|
||||||
|
# Automatically revoke access after the specified date (ISO 8601 format).
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
[access.user_expirations]
|
||||||
|
# alice = "2025-12-31T23:59:59Z"
|
||||||
|
# bob = "2026-06-15T00:00:00Z"
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Per-User Data Quotas
|
||||||
|
# Maximum total bytes transferred per user. Connection refused after limit.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
[access.user_data_quota]
|
||||||
|
# alice = 107374182400
|
||||||
|
# bob = 53687091200
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Per-User Unique IP Limits
|
||||||
|
# Maximum number of different IP addresses that can use this secret
|
||||||
|
# at the same time. Highly effective against secret leaking/sharing.
|
||||||
|
# Set to 1 for single-device, 2-3 for phone+desktop, etc.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
[access.user_max_unique_ips]
|
||||||
|
# alice = 3
|
||||||
|
# bob = 2
|
||||||
|
|
||||||
|
|
||||||
|
# ==============================================================================
|
||||||
|
# UPSTREAM ROUTING
|
||||||
|
# Controls how Telemt connects to Telegram servers (or ME servers).
|
||||||
|
# If omitted entirely, uses the OS default route.
|
||||||
|
# ==============================================================================
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Direct upstream: use the server's own network interface.
|
||||||
|
# You can optionally bind to a specific interface or local IP.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# [[upstreams]]
|
||||||
|
# type = "direct"
|
||||||
|
# interface = "eth0"
|
||||||
|
# bind_addresses = ["192.0.2.10"]
|
||||||
|
# weight = 1
|
||||||
|
# enabled = true
|
||||||
|
# scopes = "*"
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# SOCKS5 upstream: route Telegram traffic through a SOCKS5 proxy.
|
||||||
|
# Useful if your server's IP is blocked from reaching Telegram DCs.
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# [[upstreams]]
|
||||||
|
# type = "socks5"
|
||||||
|
# address = "198.51.100.30:1080"
|
||||||
|
# username = "proxy-user"
|
||||||
|
# password = "proxy-pass"
|
||||||
|
# weight = 1
|
||||||
|
# enabled = true
|
||||||
|
|
||||||
|
|
||||||
|
# ==============================================================================
|
||||||
|
# DATACENTER OVERRIDES
|
||||||
|
# Force specific DC IDs to route to specific IP:Port combinations.
|
||||||
|
# DC 203 (CDN) is auto-injected by Telemt if not specified here.
|
||||||
|
# ==============================================================================
|
||||||
|
|
||||||
|
# [dc_overrides]
|
||||||
|
# "201" = "149.154.175.50:443"
|
||||||
|
# "202" = ["149.154.167.51:443", "149.154.175.100:443"]
|
||||||
122
config.toml
122
config.toml
@@ -1,11 +1,13 @@
|
|||||||
|
### Telemt Based Config.toml
|
||||||
|
# We believe that these settings are sufficient for most scenarios
|
||||||
|
# where cutting-egde methods and parameters or special solutions are not needed
|
||||||
|
|
||||||
# === General Settings ===
|
# === General Settings ===
|
||||||
[general]
|
[general]
|
||||||
fast_mode = true
|
|
||||||
use_middle_proxy = true
|
use_middle_proxy = true
|
||||||
|
# Global ad_tag fallback when user has no per-user tag in [access.user_ad_tags]
|
||||||
# ad_tag = "00000000000000000000000000000000"
|
# ad_tag = "00000000000000000000000000000000"
|
||||||
# Path to proxy-secret binary (auto-downloaded if missing).
|
# Per-user ad_tag in [access.user_ad_tags] (32 hex from @MTProxybot)
|
||||||
proxy_secret_path = "proxy-secret"
|
|
||||||
# disable_colors = false # Disable colored output in logs (useful for files/systemd)
|
|
||||||
|
|
||||||
# === Log Level ===
|
# === Log Level ===
|
||||||
# Log level: debug | verbose | normal | silent
|
# Log level: debug | verbose | normal | silent
|
||||||
@@ -13,36 +15,6 @@ proxy_secret_path = "proxy-secret"
|
|||||||
# RUST_LOG env var takes absolute priority over all of these
|
# RUST_LOG env var takes absolute priority over all of these
|
||||||
log_level = "normal"
|
log_level = "normal"
|
||||||
|
|
||||||
# === Middle Proxy - ME ===
|
|
||||||
# Public IP override for ME KDF when behind NAT; leave unset to auto-detect.
|
|
||||||
# middle_proxy_nat_ip = "203.0.113.10"
|
|
||||||
# Enable STUN probing to discover public IP:port for ME.
|
|
||||||
middle_proxy_nat_probe = true
|
|
||||||
# Primary STUN server (host:port); defaults to Telegram STUN when empty.
|
|
||||||
middle_proxy_nat_stun = "stun.l.google.com:19302"
|
|
||||||
# Optional fallback STUN servers list.
|
|
||||||
middle_proxy_nat_stun_servers = ["stun1.l.google.com:19302", "stun2.l.google.com:19302"]
|
|
||||||
# Desired number of concurrent ME writers in pool.
|
|
||||||
middle_proxy_pool_size = 16
|
|
||||||
# Pre-initialized warm-standby ME connections kept idle.
|
|
||||||
middle_proxy_warm_standby = 8
|
|
||||||
# Ignore STUN/interface mismatch and keep ME enabled even if IP differs.
|
|
||||||
stun_iface_mismatch_ignore = false
|
|
||||||
# Keepalive padding frames - fl==4
|
|
||||||
me_keepalive_enabled = true
|
|
||||||
me_keepalive_interval_secs = 25 # Period between keepalives
|
|
||||||
me_keepalive_jitter_secs = 5 # Jitter added to interval
|
|
||||||
me_keepalive_payload_random = true # Randomize 4-byte payload (vs zeros)
|
|
||||||
# Stagger extra ME connections on warmup to de-phase lifecycles.
|
|
||||||
me_warmup_stagger_enabled = true
|
|
||||||
me_warmup_step_delay_ms = 500 # Base delay between extra connects
|
|
||||||
me_warmup_step_jitter_ms = 300 # Jitter for warmup delay
|
|
||||||
# Reconnect policy knobs.
|
|
||||||
me_reconnect_max_concurrent_per_dc = 1 # Parallel reconnects per DC - EXPERIMENTAL! UNSTABLE!
|
|
||||||
me_reconnect_backoff_base_ms = 500 # Backoff start
|
|
||||||
me_reconnect_backoff_cap_ms = 30000 # Backoff cap
|
|
||||||
me_reconnect_fast_retry_count = 11 # Quick retries before backoff
|
|
||||||
|
|
||||||
[general.modes]
|
[general.modes]
|
||||||
classic = false
|
classic = false
|
||||||
secure = false
|
secure = false
|
||||||
@@ -55,93 +27,31 @@ show = "*"
|
|||||||
# public_host = "proxy.example.com" # Host (IP or domain) for tg:// links
|
# public_host = "proxy.example.com" # Host (IP or domain) for tg:// links
|
||||||
# public_port = 443 # Port for tg:// links (default: server.port)
|
# public_port = 443 # Port for tg:// links (default: server.port)
|
||||||
|
|
||||||
# === Network Parameters ===
|
|
||||||
[network]
|
|
||||||
# Enable/disable families: true/false/auto(None)
|
|
||||||
ipv4 = true
|
|
||||||
ipv6 = false # UNSTABLE WITH ME
|
|
||||||
# prefer = 4 or 6
|
|
||||||
prefer = 4
|
|
||||||
multipath = false # EXPERIMENTAL!
|
|
||||||
|
|
||||||
# === Server Binding ===
|
# === Server Binding ===
|
||||||
[server]
|
[server]
|
||||||
port = 443
|
port = 443
|
||||||
listen_addr_ipv4 = "0.0.0.0"
|
|
||||||
listen_addr_ipv6 = "::"
|
|
||||||
# listen_unix_sock = "/var/run/telemt.sock" # Unix socket
|
|
||||||
# listen_unix_sock_perm = "0666" # Socket file permissions
|
|
||||||
# proxy_protocol = false # Enable if behind HAProxy/nginx with PROXY protocol
|
# proxy_protocol = false # Enable if behind HAProxy/nginx with PROXY protocol
|
||||||
# metrics_port = 9090
|
# metrics_port = 9090
|
||||||
# metrics_whitelist = ["127.0.0.1", "::1"]
|
# metrics_whitelist = ["127.0.0.1", "::1", "0.0.0.0/0"]
|
||||||
|
|
||||||
|
[server.api]
|
||||||
|
enabled = true
|
||||||
|
listen = "0.0.0.0:9091"
|
||||||
|
whitelist = ["127.0.0.0/8"]
|
||||||
|
minimal_runtime_enabled = false
|
||||||
|
minimal_runtime_cache_ttl_ms = 1000
|
||||||
|
|
||||||
# Listen on multiple interfaces/IPs - IPv4
|
# Listen on multiple interfaces/IPs - IPv4
|
||||||
[[server.listeners]]
|
[[server.listeners]]
|
||||||
ip = "0.0.0.0"
|
ip = "0.0.0.0"
|
||||||
|
|
||||||
# Listen on multiple interfaces/IPs - IPv6
|
|
||||||
[[server.listeners]]
|
|
||||||
ip = "::"
|
|
||||||
|
|
||||||
# === Timeouts (in seconds) ===
|
|
||||||
[timeouts]
|
|
||||||
client_handshake = 30
|
|
||||||
tg_connect = 10
|
|
||||||
client_keepalive = 60
|
|
||||||
client_ack = 300
|
|
||||||
# Quick ME reconnects for single-address DCs (count and per-attempt timeout, ms).
|
|
||||||
me_one_retry = 12
|
|
||||||
me_one_timeout_ms = 1200
|
|
||||||
|
|
||||||
# === Anti-Censorship & Masking ===
|
# === Anti-Censorship & Masking ===
|
||||||
[censorship]
|
[censorship]
|
||||||
tls_domain = "petrovich.ru"
|
tls_domain = "petrovich.ru"
|
||||||
# tls_domains = ["example.com", "cdn.example.net"] # Additional domains for EE links
|
|
||||||
mask = true
|
mask = true
|
||||||
mask_port = 443
|
tls_emulation = true # Fetch real cert lengths and emulate TLS records
|
||||||
# mask_host = "petrovich.ru" # Defaults to tls_domain if not set
|
tls_front_dir = "tlsfront" # Cache directory for TLS emulation
|
||||||
# mask_unix_sock = "/var/run/nginx.sock" # Unix socket (mutually exclusive with mask_host)
|
|
||||||
fake_cert_len = 2048
|
|
||||||
# tls_emulation = false # Fetch real cert lengths and emulate TLS records
|
|
||||||
# tls_front_dir = "tlsfront" # Cache directory for TLS emulation
|
|
||||||
|
|
||||||
# === Access Control & Users ===
|
|
||||||
[access]
|
|
||||||
replay_check_len = 65536
|
|
||||||
replay_window_secs = 1800
|
|
||||||
ignore_time_skew = false
|
|
||||||
|
|
||||||
[access.users]
|
[access.users]
|
||||||
# format: "username" = "32_hex_chars_secret"
|
# format: "username" = "32_hex_chars_secret"
|
||||||
hello = "00000000000000000000000000000000"
|
hello = "00000000000000000000000000000000"
|
||||||
|
|
||||||
# [access.user_max_tcp_conns]
|
|
||||||
# hello = 50
|
|
||||||
|
|
||||||
# [access.user_max_unique_ips]
|
|
||||||
# hello = 5
|
|
||||||
|
|
||||||
# [access.user_data_quota]
|
|
||||||
# hello = 1073741824 # 1 GB
|
|
||||||
|
|
||||||
# [access.user_expirations]
|
|
||||||
# format: username = "[year]-[month]-[day]T[hour]:[minute]:[second]Z" UTC
|
|
||||||
# hello = "2027-01-01T00:00:00Z"
|
|
||||||
|
|
||||||
# === Upstreams & Routing ===
|
|
||||||
[[upstreams]]
|
|
||||||
type = "direct"
|
|
||||||
enabled = true
|
|
||||||
weight = 10
|
|
||||||
# interface = "192.168.1.100" # Bind outgoing to specific IP or iface name
|
|
||||||
# bind_addresses = ["192.168.1.100"] # List for round-robin binding (family must match target)
|
|
||||||
|
|
||||||
# [[upstreams]]
|
|
||||||
# type = "socks5"
|
|
||||||
# address = "127.0.0.1:1080"
|
|
||||||
# enabled = false
|
|
||||||
# weight = 1
|
|
||||||
|
|
||||||
# === DC Address Overrides ===
|
|
||||||
# [dc_overrides]
|
|
||||||
# "203" = "91.105.192.100:443"
|
|
||||||
|
|||||||
16
contrib/openbsd/telemt.rcd
Normal file
16
contrib/openbsd/telemt.rcd
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
#!/bin/ksh
|
||||||
|
# /etc/rc.d/telemt
|
||||||
|
#
|
||||||
|
# rc.d(8) script for Telemt MTProxy daemon.
|
||||||
|
# Tokio runtime does not daemonize itself, so rc_bg=YES is used.
|
||||||
|
|
||||||
|
daemon="/usr/local/bin/telemt"
|
||||||
|
daemon_user="_telemt"
|
||||||
|
daemon_flags="/etc/telemt/config.toml"
|
||||||
|
|
||||||
|
. /etc/rc.d/rc.subr
|
||||||
|
|
||||||
|
rc_bg=YES
|
||||||
|
rc_reload=NO
|
||||||
|
|
||||||
|
rc_cmd $1
|
||||||
@@ -1,11 +1,12 @@
|
|||||||
services:
|
services:
|
||||||
telemt:
|
telemt:
|
||||||
|
image: ghcr.io/telemt/telemt:latest
|
||||||
build: .
|
build: .
|
||||||
container_name: telemt
|
container_name: telemt
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
ports:
|
ports:
|
||||||
- "443:443"
|
- "443:443"
|
||||||
- "9090:9090"
|
- "127.0.0.1:9090:9090"
|
||||||
# Allow caching 'proxy-secret' in read-only container
|
# Allow caching 'proxy-secret' in read-only container
|
||||||
working_dir: /run/telemt
|
working_dir: /run/telemt
|
||||||
volumes:
|
volumes:
|
||||||
|
|||||||
1135
docs/API.md
Normal file
1135
docs/API.md
Normal file
File diff suppressed because it is too large
Load Diff
112
docs/FAQ.en.md
Normal file
112
docs/FAQ.en.md
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
## How to set up "proxy sponsor" channel and statistics via @MTProxybot bot
|
||||||
|
|
||||||
|
1. Go to @MTProxybot bot.
|
||||||
|
2. Enter the command `/newproxy`
|
||||||
|
3. Send the server IP and port. For example: 1.2.3.4:443
|
||||||
|
4. Open the config `nano /etc/telemt.toml`.
|
||||||
|
5. Copy and send the user secret from the [access.users] section to the bot.
|
||||||
|
6. Copy the tag received from the bot. For example 1234567890abcdef1234567890abcdef.
|
||||||
|
> [!WARNING]
|
||||||
|
> The link provided by the bot will not work. Do not copy or use it!
|
||||||
|
7. Uncomment the ad_tag parameter and enter the tag received from the bot.
|
||||||
|
8. Uncomment/add the parameter `use_middle_proxy = true`.
|
||||||
|
|
||||||
|
Config example:
|
||||||
|
```toml
|
||||||
|
[general]
|
||||||
|
ad_tag = "1234567890abcdef1234567890abcdef"
|
||||||
|
use_middle_proxy = true
|
||||||
|
```
|
||||||
|
9. Save the config. Ctrl+S -> Ctrl+X.
|
||||||
|
10. Restart telemt `systemctl restart telemt`.
|
||||||
|
11. In the bot, send the command /myproxies and select the added server.
|
||||||
|
12. Click the "Set promotion" button.
|
||||||
|
13. Send a **public link** to the channel. Private channels cannot be added!
|
||||||
|
14. Wait approximately 1 hour for the information to update on Telegram servers.
|
||||||
|
> [!WARNING]
|
||||||
|
> You will not see the "proxy sponsor" if you are already subscribed to the channel.
|
||||||
|
|
||||||
|
**You can also set up different channels for different users.**
|
||||||
|
```toml
|
||||||
|
[access.user_ad_tags]
|
||||||
|
hello = "ad_tag"
|
||||||
|
hello2 = "ad_tag2"
|
||||||
|
```
|
||||||
|
|
||||||
|
## How many people can use 1 link
|
||||||
|
|
||||||
|
By default, 1 link can be used by any number of people.
|
||||||
|
You can limit the number of IPs using the proxy.
|
||||||
|
```toml
|
||||||
|
[access.user_max_unique_ips]
|
||||||
|
hello = 1
|
||||||
|
```
|
||||||
|
This parameter limits how many unique IPs can use 1 link simultaneously. If one user disconnects, a second user can connect. Also, multiple users can sit behind the same IP.
|
||||||
|
|
||||||
|
## How to create multiple different links
|
||||||
|
|
||||||
|
1. Generate the required number of secrets `openssl rand -hex 16`
|
||||||
|
2. Open the config `nano /etc/telemt.toml`
|
||||||
|
3. Add new users.
|
||||||
|
```toml
|
||||||
|
[access.users]
|
||||||
|
user1 = "00000000000000000000000000000001"
|
||||||
|
user2 = "00000000000000000000000000000002"
|
||||||
|
user3 = "00000000000000000000000000000003"
|
||||||
|
```
|
||||||
|
4. Save the config. Ctrl+S -> Ctrl+X. You don't need to restart telemt.
|
||||||
|
5. Get the links via `journalctl -u telemt -n -g "links" --no-pager -o cat | tac`
|
||||||
|
|
||||||
|
## How to view metrics
|
||||||
|
|
||||||
|
1. Open the config `nano /etc/telemt.toml`
|
||||||
|
2. Add the following parameters
|
||||||
|
```toml
|
||||||
|
[server]
|
||||||
|
metrics_port = 9090
|
||||||
|
metrics_whitelist = ["127.0.0.1/32", "::1/128", "0.0.0.0/0"]
|
||||||
|
```
|
||||||
|
3. Save the config. Ctrl+S -> Ctrl+X.
|
||||||
|
4. Metrics are available at SERVER_IP:9090/metrics.
|
||||||
|
> [!WARNING]
|
||||||
|
> "0.0.0.0/0" in metrics_whitelist opens access from any IP. Replace with your own IP. For example "1.2.3.4"
|
||||||
|
|
||||||
|
## Additional parameters
|
||||||
|
|
||||||
|
### Domain in link instead of IP
|
||||||
|
To specify a domain in the links, add to the `[general.links]` section of the config file.
|
||||||
|
```toml
|
||||||
|
[general.links]
|
||||||
|
public_host = "proxy.example.com"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Upstream Manager
|
||||||
|
To specify an upstream, add to the `[[upstreams]]` section of the config.toml file:
|
||||||
|
#### Binding to IP
|
||||||
|
```toml
|
||||||
|
[[upstreams]]
|
||||||
|
type = "direct"
|
||||||
|
weight = 1
|
||||||
|
enabled = true
|
||||||
|
interface = "192.168.1.100" # Change to your outgoing IP
|
||||||
|
```
|
||||||
|
#### SOCKS4/5 as Upstream
|
||||||
|
- Without authentication:
|
||||||
|
```toml
|
||||||
|
[[upstreams]]
|
||||||
|
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||||
|
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||||
|
weight = 1 # Set Weight for Scenarios
|
||||||
|
enabled = true
|
||||||
|
```
|
||||||
|
|
||||||
|
- With authentication:
|
||||||
|
```toml
|
||||||
|
[[upstreams]]
|
||||||
|
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||||
|
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||||
|
username = "user" # Username for Auth on SOCKS-server
|
||||||
|
password = "pass" # Password for Auth on SOCKS-server
|
||||||
|
weight = 1 # Set Weight for Scenarios
|
||||||
|
enabled = true
|
||||||
|
```
|
||||||
112
docs/FAQ.ru.md
Normal file
112
docs/FAQ.ru.md
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
## Как настроить канал "спонсор прокси" и статистику через бота @MTProxybot
|
||||||
|
|
||||||
|
1. Зайти в бота @MTProxybot.
|
||||||
|
2. Ввести команду `/newproxy`
|
||||||
|
3. Отправить IP и порт сервера. Например: 1.2.3.4:443
|
||||||
|
4. Открыть конфиг `nano /etc/telemt.toml`.
|
||||||
|
5. Скопировать и отправить боту секрет пользователя из раздела [access.users].
|
||||||
|
6. Скопировать полученный tag у бота. Например 1234567890abcdef1234567890abcdef.
|
||||||
|
> [!WARNING]
|
||||||
|
> Ссылка, которую выдает бот, не будет работать. Не копируйте и не используйте её!
|
||||||
|
7. Раскомментировать параметр ad_tag и вписать tag, полученный у бота.
|
||||||
|
8. Раскомментировать/добавить параметр use_middle_proxy = true.
|
||||||
|
|
||||||
|
Пример конфига:
|
||||||
|
```toml
|
||||||
|
[general]
|
||||||
|
ad_tag = "1234567890abcdef1234567890abcdef"
|
||||||
|
use_middle_proxy = true
|
||||||
|
```
|
||||||
|
9. Сохранить конфиг. Ctrl+S -> Ctrl+X.
|
||||||
|
10. Перезапустить telemt `systemctl restart telemt`.
|
||||||
|
11. В боте отправить команду /myproxies и выбрать добавленный сервер.
|
||||||
|
12. Нажать кнопку "Set promotion".
|
||||||
|
13. Отправить **публичную ссылку** на канал. Приватный канал добавить нельзя!
|
||||||
|
14. Подождать примерно 1 час, пока информация обновится на серверах Telegram.
|
||||||
|
> [!WARNING]
|
||||||
|
> У вас не будет отображаться "спонсор прокси" если вы уже подписаны на канал.
|
||||||
|
|
||||||
|
**Также вы можете настроить разные каналы для разных пользователей.**
|
||||||
|
```toml
|
||||||
|
[access.user_ad_tags]
|
||||||
|
hello = "ad_tag"
|
||||||
|
hello2 = "ad_tag2"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Сколько человек может пользоваться 1 ссылкой
|
||||||
|
|
||||||
|
По умолчанию 1 ссылкой может пользоваться сколько угодно человек.
|
||||||
|
Вы можете ограничить число IP, использующих прокси.
|
||||||
|
```toml
|
||||||
|
[access.user_max_unique_ips]
|
||||||
|
hello = 1
|
||||||
|
```
|
||||||
|
Этот параметр ограничивает, сколько уникальных IP может использовать 1 ссылку одновременно. Если один пользователь отключится, второй сможет подключиться. Также с одного IP может сидеть несколько пользователей.
|
||||||
|
|
||||||
|
## Как сделать несколько разных ссылок
|
||||||
|
|
||||||
|
1. Сгенерируйте нужное число секретов `openssl rand -hex 16`
|
||||||
|
2. Открыть конфиг `nano /etc/telemt.toml`
|
||||||
|
3. Добавить новых пользователей.
|
||||||
|
```toml
|
||||||
|
[access.users]
|
||||||
|
user1 = "00000000000000000000000000000001"
|
||||||
|
user2 = "00000000000000000000000000000002"
|
||||||
|
user3 = "00000000000000000000000000000003"
|
||||||
|
```
|
||||||
|
4. Сохранить конфиг. Ctrl+S -> Ctrl+X. Перезапускать telemt не нужно.
|
||||||
|
5. Получить ссылки через `journalctl -u telemt -n -g "links" --no-pager -o cat | tac`
|
||||||
|
|
||||||
|
## Как посмотреть метрики
|
||||||
|
|
||||||
|
1. Открыть конфиг `nano /etc/telemt.toml`
|
||||||
|
2. Добавить следующие параметры
|
||||||
|
```toml
|
||||||
|
[server]
|
||||||
|
metrics_port = 9090
|
||||||
|
metrics_whitelist = ["127.0.0.1/32", "::1/128", "0.0.0.0/0"]
|
||||||
|
```
|
||||||
|
3. Сохранить конфиг. Ctrl+S -> Ctrl+X.
|
||||||
|
4. Метрики доступны по адресу SERVER_IP:9090/metrics.
|
||||||
|
> [!WARNING]
|
||||||
|
> "0.0.0.0/0" в metrics_whitelist открывает доступ с любого IP. Замените на свой ip. Например "1.2.3.4"
|
||||||
|
|
||||||
|
## Дополнительные параметры
|
||||||
|
|
||||||
|
### Домен в ссылке вместо IP
|
||||||
|
Чтобы указать домен в ссылках, добавьте в секцию `[general.links]` файла config.
|
||||||
|
```toml
|
||||||
|
[general.links]
|
||||||
|
public_host = "proxy.example.com"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Upstream Manager
|
||||||
|
Чтобы указать апстрим, добавьте в секцию `[[upstreams]]` файла config.toml:
|
||||||
|
#### Привязка к IP
|
||||||
|
```toml
|
||||||
|
[[upstreams]]
|
||||||
|
type = "direct"
|
||||||
|
weight = 1
|
||||||
|
enabled = true
|
||||||
|
interface = "192.168.1.100" # Change to your outgoing IP
|
||||||
|
```
|
||||||
|
#### SOCKS4/5 как Upstream
|
||||||
|
- Без авторизации:
|
||||||
|
```toml
|
||||||
|
[[upstreams]]
|
||||||
|
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||||
|
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||||
|
weight = 1 # Set Weight for Scenarios
|
||||||
|
enabled = true
|
||||||
|
```
|
||||||
|
|
||||||
|
- С авторизацией:
|
||||||
|
```toml
|
||||||
|
[[upstreams]]
|
||||||
|
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||||
|
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||||
|
username = "user" # Username for Auth on SOCKS-server
|
||||||
|
password = "pass" # Password for Auth on SOCKS-server
|
||||||
|
weight = 1 # Set Weight for Scenarios
|
||||||
|
enabled = true
|
||||||
|
```
|
||||||
40
docs/MIDDLE-END-KDF.de.md
Normal file
40
docs/MIDDLE-END-KDF.de.md
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# Middle-End Proxy
|
||||||
|
|
||||||
|
## KDF-Adressierung — Implementierungs-FAQ
|
||||||
|
|
||||||
|
### Benötigt die C-Referenzimplementierung sowohl externe IP-Adresse als auch Port für die KDF?
|
||||||
|
|
||||||
|
Ja.
|
||||||
|
|
||||||
|
In der C-Referenzimplementierung werden **sowohl IP-Adresse als auch Port in die KDF einbezogen** — auf beiden Seiten der Verbindung.
|
||||||
|
|
||||||
|
In `aes_create_keys()` enthält der KDF-Input:
|
||||||
|
|
||||||
|
- `server_ip + client_port`
|
||||||
|
- `client_ip + server_port`
|
||||||
|
- sowie Secret / Nonces
|
||||||
|
|
||||||
|
Für IPv6:
|
||||||
|
|
||||||
|
- IPv4-Felder werden auf 0 gesetzt
|
||||||
|
- IPv6-Adressen werden ergänzt
|
||||||
|
|
||||||
|
Die **Ports bleiben weiterhin Bestandteil der KDF**.
|
||||||
|
|
||||||
|
> Wenn sich externe IP oder Port (z. B. durch NAT, SOCKS oder Proxy) von den erwarteten Werten unterscheiden, entstehen unterschiedliche Schlüssel — der Handshake schlägt fehl.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Kann der Port aus der KDF ausgeschlossen werden (z. B. durch Port = 0)?
|
||||||
|
|
||||||
|
**Nein!**
|
||||||
|
|
||||||
|
Die C-Referenzimplementierung enthält **keine Möglichkeit, den Port zu ignorieren**:
|
||||||
|
- `client_port` und `server_port` sind fester Bestandteil der KDF
|
||||||
|
- Es werden immer reale Socket-Ports übergeben:
|
||||||
|
- `c->our_port`
|
||||||
|
- `c->remote_port`
|
||||||
|
|
||||||
|
Falls ein Port den Wert `0` hat, wird er dennoch als `0` in die KDF übernommen.
|
||||||
|
|
||||||
|
Eine „Port-Ignore“-Logik existiert nicht.
|
||||||
41
docs/MIDDLE-END-KDF.en.md
Normal file
41
docs/MIDDLE-END-KDF.en.md
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
# Middle-End Proxy
|
||||||
|
|
||||||
|
## KDF Addressing — Implementation FAQ
|
||||||
|
|
||||||
|
### Does the C-implementation require both external IP address and port for the KDF?
|
||||||
|
|
||||||
|
**Yes!**
|
||||||
|
|
||||||
|
In the C reference implementation, **both IP address and port are included in the KDF input** from both sides of the connection.
|
||||||
|
|
||||||
|
Inside `aes_create_keys()`, the KDF input explicitly contains:
|
||||||
|
|
||||||
|
- `server_ip + client_port`
|
||||||
|
- `client_ip + server_port`
|
||||||
|
- followed by shared secret / nonces
|
||||||
|
|
||||||
|
For IPv6:
|
||||||
|
|
||||||
|
- IPv4 fields are zeroed
|
||||||
|
- IPv6 addresses are inserted
|
||||||
|
|
||||||
|
However, **client_port and server_port remain part of the KDF regardless of IP version**.
|
||||||
|
|
||||||
|
> If externally observed IP or port (e.g. due to NAT, SOCKS, or proxy traversal) differs from what the peer expects, the derived keys will not match and the handshake will fail.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Can port be excluded from KDF (e.g. by using port = 0)?
|
||||||
|
|
||||||
|
**No!**
|
||||||
|
|
||||||
|
The C-implementation provides **no mechanism to ignore the port**:
|
||||||
|
|
||||||
|
- `client_port` and `server_port` are explicitly included in the KDF input
|
||||||
|
- Real socket ports are always passed:
|
||||||
|
- `c->our_port`
|
||||||
|
- `c->remote_port`
|
||||||
|
|
||||||
|
If a port is `0`, it is still incorporated into the KDF as `0`.
|
||||||
|
|
||||||
|
There is **no conditional logic to exclude ports**
|
||||||
41
docs/MIDDLE-END-KDF.ru.md
Normal file
41
docs/MIDDLE-END-KDF.ru.md
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
# Middle-End Proxy
|
||||||
|
|
||||||
|
## KDF Addressing — FAQ по реализации
|
||||||
|
|
||||||
|
### Требует ли C-референсная реализация KDF внешний IP и порт?
|
||||||
|
|
||||||
|
**Да**
|
||||||
|
|
||||||
|
В C-референсе **в KDF участвуют и IP-адрес, и порт** — с обеих сторон соединения.
|
||||||
|
|
||||||
|
В `aes_create_keys()` в строку KDF входят:
|
||||||
|
|
||||||
|
- `server_ip + client_port`
|
||||||
|
- `client_ip + server_port`
|
||||||
|
- далее secret / nonces
|
||||||
|
|
||||||
|
Для IPv6:
|
||||||
|
|
||||||
|
- IPv4-поля заполняются нулями
|
||||||
|
- добавляются IPv6-адреса
|
||||||
|
|
||||||
|
Однако **порты client_port и server_port всё равно участвуют в KDF**.
|
||||||
|
|
||||||
|
> Если внешний IP или порт (например, из-за NAT, SOCKS или прокси) не совпадает с ожидаемым другой стороной — ключи расходятся и handshake ломается.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Можно ли исключить порт из KDF (например, установив порт = 0)?
|
||||||
|
|
||||||
|
**Нет.**
|
||||||
|
|
||||||
|
В C-референсе **нет механики отключения порта**.
|
||||||
|
|
||||||
|
- `client_port` и `server_port` явно включены в KDF
|
||||||
|
- Передаются реальные порты сокета:
|
||||||
|
- `c->our_port`
|
||||||
|
- `c->remote_port`
|
||||||
|
|
||||||
|
Если порт равен `0`, он всё равно попадёт в KDF как `0`.
|
||||||
|
|
||||||
|
Отдельной логики «игнорировать порт» не предусмотрено.
|
||||||
132
docs/OPENBSD.en.md
Normal file
132
docs/OPENBSD.en.md
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
# Telemt on OpenBSD (Build, Run, and rc.d)
|
||||||
|
|
||||||
|
This guide covers a practical OpenBSD deployment flow for Telemt:
|
||||||
|
- build from source,
|
||||||
|
- install binary and config,
|
||||||
|
- run as an rc.d daemon,
|
||||||
|
- verify basic runtime behavior.
|
||||||
|
|
||||||
|
## 1. Prerequisites
|
||||||
|
|
||||||
|
Install required packages:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
doas pkg_add rust git
|
||||||
|
```
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- Telemt release installer (`install.sh`) is Linux-only.
|
||||||
|
- On OpenBSD, use source build with `cargo`.
|
||||||
|
|
||||||
|
## 2. Build from source
|
||||||
|
|
||||||
|
```sh
|
||||||
|
git clone https://github.com/telemt/telemt
|
||||||
|
cd telemt
|
||||||
|
cargo build --release
|
||||||
|
./target/release/telemt --version
|
||||||
|
```
|
||||||
|
|
||||||
|
For low-RAM systems, this repository already uses `lto = "thin"` in release profile.
|
||||||
|
|
||||||
|
## 3. Install binary and config
|
||||||
|
|
||||||
|
```sh
|
||||||
|
doas install -d -m 0755 /usr/local/bin
|
||||||
|
doas install -m 0755 ./target/release/telemt /usr/local/bin/telemt
|
||||||
|
|
||||||
|
doas install -d -m 0750 /etc/telemt
|
||||||
|
doas install -m 0640 ./config.toml /etc/telemt/config.toml
|
||||||
|
```
|
||||||
|
|
||||||
|
## 4. Create runtime user
|
||||||
|
|
||||||
|
```sh
|
||||||
|
doas useradd -L daemon -s /sbin/nologin -d /var/empty _telemt
|
||||||
|
```
|
||||||
|
|
||||||
|
If `_telemt` already exists, continue.
|
||||||
|
|
||||||
|
## 5. Install rc.d service
|
||||||
|
|
||||||
|
Install the provided script:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
doas install -m 0555 ./contrib/openbsd/telemt.rcd /etc/rc.d/telemt
|
||||||
|
```
|
||||||
|
|
||||||
|
Enable and start:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
doas rcctl enable telemt
|
||||||
|
# Optional: send daemon output to syslog
|
||||||
|
#doas rcctl set telemt logger daemon.info
|
||||||
|
|
||||||
|
doas rcctl start telemt
|
||||||
|
```
|
||||||
|
|
||||||
|
Service controls:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
doas rcctl check telemt
|
||||||
|
doas rcctl restart telemt
|
||||||
|
doas rcctl stop telemt
|
||||||
|
```
|
||||||
|
|
||||||
|
## 6. Resource limits (recommended)
|
||||||
|
|
||||||
|
OpenBSD rc.d can apply limits via login class. Add class `telemt` and assign it to `_telemt`.
|
||||||
|
|
||||||
|
Example class entry:
|
||||||
|
|
||||||
|
```text
|
||||||
|
telemt:\
|
||||||
|
:openfiles-cur=8192:openfiles-max=16384:\
|
||||||
|
:datasize-cur=768M:datasize-max=1024M:\
|
||||||
|
:coredumpsize=0:\
|
||||||
|
:tc=daemon:
|
||||||
|
```
|
||||||
|
|
||||||
|
These values are conservative defaults for small and medium deployments.
|
||||||
|
Increase `openfiles-*` only if logs show descriptor exhaustion under load.
|
||||||
|
|
||||||
|
Then rebuild database and assign class:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
doas cap_mkdb /etc/login.conf
|
||||||
|
#doas usermod -L telemt _telemt
|
||||||
|
```
|
||||||
|
|
||||||
|
Uncomment `usermod` if you want this class bound to the Telemt user.
|
||||||
|
|
||||||
|
## 7. Functional smoke test
|
||||||
|
|
||||||
|
1. Validate service state:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
doas rcctl check telemt
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Check listener is present (replace 443 if needed):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
netstat -n -f inet -p tcp | grep LISTEN | grep '\.443'
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Verify process user:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
ps -o user,pid,command -ax | grep telemt | grep -v grep
|
||||||
|
```
|
||||||
|
|
||||||
|
4. If startup fails, debug in foreground:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
RUST_LOG=debug /usr/local/bin/telemt /etc/telemt/config.toml
|
||||||
|
```
|
||||||
|
|
||||||
|
## 8. OpenBSD-specific caveats
|
||||||
|
|
||||||
|
- OpenBSD does not support per-socket keepalive retries/interval tuning in the same way as Linux.
|
||||||
|
- Telemt source already uses target-aware cfg gates for keepalive setup.
|
||||||
|
- Use rc.d/rcctl, not systemd.
|
||||||
187
docs/QUICK_START_GUIDE.en.md
Normal file
187
docs/QUICK_START_GUIDE.en.md
Normal file
@@ -0,0 +1,187 @@
|
|||||||
|
# Telemt via Systemd
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
This software is designed for Debian-based OS: in addition to Debian, these are Ubuntu, Mint, Kali, MX and many other Linux
|
||||||
|
|
||||||
|
**1. Download**
|
||||||
|
```bash
|
||||||
|
wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz
|
||||||
|
```
|
||||||
|
**2. Move to the Bin folder**
|
||||||
|
```bash
|
||||||
|
mv telemt /bin
|
||||||
|
```
|
||||||
|
**3. Make the file executable**
|
||||||
|
```bash
|
||||||
|
chmod +x /bin/telemt
|
||||||
|
```
|
||||||
|
|
||||||
|
## How to use?
|
||||||
|
|
||||||
|
**This guide "assumes" that you:**
|
||||||
|
- logged in as root or executed `su -` / `sudo su`
|
||||||
|
- Already have the "telemt" executable file in the /bin folder. Read the **[Installation](#Installation)** section.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**0. Check port and generate secrets**
|
||||||
|
|
||||||
|
The port you have selected for use should be MISSING from the list, when:
|
||||||
|
```bash
|
||||||
|
netstat -lnp
|
||||||
|
```
|
||||||
|
|
||||||
|
Generate 16 bytes/32 characters HEX with OpenSSL or another way:
|
||||||
|
```bash
|
||||||
|
openssl rand -hex 16
|
||||||
|
```
|
||||||
|
OR
|
||||||
|
```bash
|
||||||
|
xxd -l 16 -p /dev/urandom
|
||||||
|
```
|
||||||
|
OR
|
||||||
|
```bash
|
||||||
|
python3 -c 'import os; print(os.urandom(16).hex())'
|
||||||
|
```
|
||||||
|
Save the obtained result somewhere. You will need it later!
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**1. Place your config to /etc/telemt/telemt.toml**
|
||||||
|
|
||||||
|
Create config directory:
|
||||||
|
```bash
|
||||||
|
mkdir /etc/telemt
|
||||||
|
```
|
||||||
|
|
||||||
|
Open nano
|
||||||
|
```bash
|
||||||
|
nano /etc/telemt/telemt.toml
|
||||||
|
```
|
||||||
|
paste your config
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# === General Settings ===
|
||||||
|
[general]
|
||||||
|
# ad_tag = "00000000000000000000000000000000"
|
||||||
|
use_middle_proxy = false
|
||||||
|
|
||||||
|
[general.modes]
|
||||||
|
classic = false
|
||||||
|
secure = false
|
||||||
|
tls = true
|
||||||
|
|
||||||
|
[server.api]
|
||||||
|
enabled = true
|
||||||
|
# listen = "127.0.0.1:9091"
|
||||||
|
# whitelist = ["127.0.0.1/32"]
|
||||||
|
# read_only = true
|
||||||
|
|
||||||
|
# === Anti-Censorship & Masking ===
|
||||||
|
[censorship]
|
||||||
|
tls_domain = "petrovich.ru"
|
||||||
|
|
||||||
|
[access.users]
|
||||||
|
# format: "username" = "32_hex_chars_secret"
|
||||||
|
hello = "00000000000000000000000000000000"
|
||||||
|
```
|
||||||
|
|
||||||
|
then Ctrl+S -> Ctrl+X to save
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> Replace the value of the hello parameter with the value you obtained in step 0.
|
||||||
|
> Replace the value of the tls_domain parameter with another website.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**2. Create telemt user**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
useradd -d /opt/telemt -m -r -U telemt
|
||||||
|
chown -R telemt:telemt /etc/telemt
|
||||||
|
```
|
||||||
|
|
||||||
|
**3. Create service on /etc/systemd/system/telemt.service**
|
||||||
|
|
||||||
|
Open nano
|
||||||
|
```bash
|
||||||
|
nano /etc/systemd/system/telemt.service
|
||||||
|
```
|
||||||
|
|
||||||
|
paste this Systemd Module
|
||||||
|
```bash
|
||||||
|
[Unit]
|
||||||
|
Description=Telemt
|
||||||
|
After=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
User=telemt
|
||||||
|
Group=telemt
|
||||||
|
WorkingDirectory=/opt/telemt
|
||||||
|
ExecStart=/bin/telemt /etc/telemt/telemt.toml
|
||||||
|
Restart=on-failure
|
||||||
|
LimitNOFILE=65536
|
||||||
|
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||||
|
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||||
|
NoNewPrivileges=true
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
```
|
||||||
|
then Ctrl+S -> Ctrl+X to save
|
||||||
|
|
||||||
|
reload systemd units
|
||||||
|
```bash
|
||||||
|
systemctl daemon-reload
|
||||||
|
```
|
||||||
|
|
||||||
|
**4.** To start it, enter the command `systemctl start telemt`
|
||||||
|
|
||||||
|
**5.** To get status information, enter `systemctl status telemt`
|
||||||
|
|
||||||
|
**6.** For automatic startup at system boot, enter `systemctl enable telemt`
|
||||||
|
|
||||||
|
**7.** To get the link(s), enter
|
||||||
|
```bash
|
||||||
|
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||||
|
```
|
||||||
|
|
||||||
|
> Any number of people can use one link.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# Telemt via Docker Compose
|
||||||
|
|
||||||
|
**1. Edit `config.toml` in repo root (at least: port, users secrets, tls_domain)**
|
||||||
|
**2. Start container:**
|
||||||
|
```bash
|
||||||
|
docker compose up -d --build
|
||||||
|
```
|
||||||
|
**3. Check logs:**
|
||||||
|
```bash
|
||||||
|
docker compose logs -f telemt
|
||||||
|
```
|
||||||
|
**4. Stop:**
|
||||||
|
```bash
|
||||||
|
docker compose down
|
||||||
|
```
|
||||||
|
> [!NOTE]
|
||||||
|
> - `docker-compose.yml` maps `./config.toml` to `/app/config.toml` (read-only)
|
||||||
|
> - By default it publishes `443:443` and runs with dropped capabilities (only `NET_BIND_SERVICE` is added)
|
||||||
|
> - If you really need host networking (usually only for some IPv6 setups) uncomment `network_mode: host`
|
||||||
|
|
||||||
|
**Run without Compose**
|
||||||
|
```bash
|
||||||
|
docker build -t telemt:local .
|
||||||
|
docker run --name telemt --restart unless-stopped \
|
||||||
|
-p 443:443 \
|
||||||
|
-e RUST_LOG=info \
|
||||||
|
-v "$PWD/config.toml:/app/config.toml:ro" \
|
||||||
|
--read-only \
|
||||||
|
--cap-drop ALL --cap-add NET_BIND_SERVICE \
|
||||||
|
--ulimit nofile=65536:65536 \
|
||||||
|
telemt:local
|
||||||
|
```
|
||||||
189
docs/QUICK_START_GUIDE.ru.md
Normal file
189
docs/QUICK_START_GUIDE.ru.md
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
# Telemt через Systemd
|
||||||
|
|
||||||
|
## Установка
|
||||||
|
|
||||||
|
Это программное обеспечение разработано для ОС на базе Debian: помимо Debian, это Ubuntu, Mint, Kali, MX и многие другие Linux
|
||||||
|
|
||||||
|
**1. Скачать**
|
||||||
|
```bash
|
||||||
|
wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz
|
||||||
|
```
|
||||||
|
**2. Переместить в папку Bin**
|
||||||
|
```bash
|
||||||
|
mv telemt /bin
|
||||||
|
```
|
||||||
|
**3. Сделать файл исполняемым**
|
||||||
|
```bash
|
||||||
|
chmod +x /bin/telemt
|
||||||
|
```
|
||||||
|
|
||||||
|
## Как правильно использовать?
|
||||||
|
|
||||||
|
**Эта инструкция "предполагает", что вы:**
|
||||||
|
- Авторизовались как пользователь root или выполнил `su -` / `sudo su`
|
||||||
|
- У вас уже есть исполняемый файл "telemt" в папке /bin. Читайте раздел **[Установка](#установка)**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**0. Проверьте порт и сгенерируйте секреты**
|
||||||
|
|
||||||
|
Порт, который вы выбрали для использования, должен отсутствовать в списке:
|
||||||
|
```bash
|
||||||
|
netstat -lnp
|
||||||
|
```
|
||||||
|
|
||||||
|
Сгенерируйте 16 bytes/32 символа в шестнадцатеричном формате с помощью OpenSSL или другим способом:
|
||||||
|
```bash
|
||||||
|
openssl rand -hex 16
|
||||||
|
```
|
||||||
|
ИЛИ
|
||||||
|
```bash
|
||||||
|
xxd -l 16 -p /dev/urandom
|
||||||
|
```
|
||||||
|
ИЛИ
|
||||||
|
```bash
|
||||||
|
python3 -c 'import os; print(os.urandom(16).hex())'
|
||||||
|
```
|
||||||
|
Полученный результат сохраняем где-нибудь. Он понадобиться вам дальше!
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**1. Поместите свою конфигурацию в файл /etc/telemt/telemt.toml**
|
||||||
|
|
||||||
|
Создаём директорию для конфига:
|
||||||
|
```bash
|
||||||
|
mkdir /etc/telemt
|
||||||
|
```
|
||||||
|
|
||||||
|
Открываем nano
|
||||||
|
```bash
|
||||||
|
nano /etc/telemt/telemt.toml
|
||||||
|
```
|
||||||
|
Вставьте свою конфигурацию
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# === General Settings ===
|
||||||
|
[general]
|
||||||
|
# ad_tag = "00000000000000000000000000000000"
|
||||||
|
use_middle_proxy = false
|
||||||
|
|
||||||
|
[general.modes]
|
||||||
|
classic = false
|
||||||
|
secure = false
|
||||||
|
tls = true
|
||||||
|
|
||||||
|
[server.api]
|
||||||
|
enabled = true
|
||||||
|
# listen = "127.0.0.1:9091"
|
||||||
|
# whitelist = ["127.0.0.1/32"]
|
||||||
|
# read_only = true
|
||||||
|
|
||||||
|
# === Anti-Censorship & Masking ===
|
||||||
|
[censorship]
|
||||||
|
tls_domain = "petrovich.ru"
|
||||||
|
|
||||||
|
[access.users]
|
||||||
|
# format: "username" = "32_hex_chars_secret"
|
||||||
|
hello = "00000000000000000000000000000000"
|
||||||
|
```
|
||||||
|
|
||||||
|
Затем нажмите Ctrl+S -> Ctrl+X, чтобы сохранить
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> Замените значение параметра hello на значение, которое вы получили в пункте 0.
|
||||||
|
> Так же замените значение параметра tls_domain на другой сайт.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**2. Создайте пользователя для telemt**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
useradd -d /opt/telemt -m -r -U telemt
|
||||||
|
chown -R telemt:telemt /etc/telemt
|
||||||
|
```
|
||||||
|
|
||||||
|
**3. Создайте службу в /etc/systemd/system/telemt.service**
|
||||||
|
|
||||||
|
Открываем nano
|
||||||
|
```bash
|
||||||
|
nano /etc/systemd/system/telemt.service
|
||||||
|
```
|
||||||
|
|
||||||
|
Вставьте этот модуль Systemd
|
||||||
|
```bash
|
||||||
|
[Unit]
|
||||||
|
Description=Telemt
|
||||||
|
After=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
User=telemt
|
||||||
|
Group=telemt
|
||||||
|
WorkingDirectory=/opt/telemt
|
||||||
|
ExecStart=/bin/telemt /etc/telemt/telemt.toml
|
||||||
|
Restart=on-failure
|
||||||
|
LimitNOFILE=65536
|
||||||
|
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||||
|
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||||
|
NoNewPrivileges=true
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
```
|
||||||
|
Затем нажмите Ctrl+S -> Ctrl+X, чтобы сохранить
|
||||||
|
|
||||||
|
перезагрузите конфигурацию systemd
|
||||||
|
```bash
|
||||||
|
systemctl daemon-reload
|
||||||
|
```
|
||||||
|
|
||||||
|
**4.** Для запуска введите команду `systemctl start telemt`
|
||||||
|
|
||||||
|
**5.** Для получения информации о статусе введите `systemctl status telemt`
|
||||||
|
|
||||||
|
**6.** Для автоматического запуска при запуске системы в введите `systemctl enable telemt`
|
||||||
|
|
||||||
|
**7.** Для получения ссылки/ссылок введите
|
||||||
|
```bash
|
||||||
|
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||||
|
```
|
||||||
|
> Одной ссылкой может пользоваться сколько угодно человек.
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> Рабочую ссылку может выдать только команда из 6 пункта. Не пытайтесь делать ее самостоятельно или копировать откуда-либо если вы не уверены в том, что делаете!
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# Telemt через Docker Compose
|
||||||
|
|
||||||
|
**1. Отредактируйте `config.toml` в корневом каталоге репозитория (как минимум: порт, пользовательские секреты, tls_domain)**
|
||||||
|
**2. Запустите контейнер:**
|
||||||
|
```bash
|
||||||
|
docker compose up -d --build
|
||||||
|
```
|
||||||
|
**3. Проверьте логи:**
|
||||||
|
```bash
|
||||||
|
docker compose logs -f telemt
|
||||||
|
```
|
||||||
|
**4. Остановите контейнер:**
|
||||||
|
```bash
|
||||||
|
docker compose down
|
||||||
|
```
|
||||||
|
> [!NOTE]
|
||||||
|
> - В `docker-compose.yml` файл `./config.toml` монтируется в `/app/config.toml` (доступно только для чтения)
|
||||||
|
> - По умолчанию публикуются порты 443:443, а контейнер запускается со сброшенными привилегиями (добавлена только `NET_BIND_SERVICE`)
|
||||||
|
> - Если вам действительно нужна сеть хоста (обычно это требуется только для некоторых конфигураций IPv6), раскомментируйте `network_mode: host`
|
||||||
|
|
||||||
|
**Запуск в Docker Compose**
|
||||||
|
```bash
|
||||||
|
docker build -t telemt:local .
|
||||||
|
docker run --name telemt --restart unless-stopped \
|
||||||
|
-p 443:443 \
|
||||||
|
-e RUST_LOG=info \
|
||||||
|
-v "$PWD/config.toml:/app/config.toml:ro" \
|
||||||
|
--read-only \
|
||||||
|
--cap-drop ALL --cap-add NET_BIND_SERVICE \
|
||||||
|
--ulimit nofile=65536:65536 \
|
||||||
|
telemt:local
|
||||||
|
```
|
||||||
219
docs/TUNING.de.md
Normal file
219
docs/TUNING.de.md
Normal file
@@ -0,0 +1,219 @@
|
|||||||
|
# Telemt Tuning-Leitfaden: Middle-End und Upstreams
|
||||||
|
|
||||||
|
Dieses Dokument beschreibt das aktuelle Laufzeitverhalten für Middle-End (ME) und Upstream-Routing basierend auf:
|
||||||
|
- `src/config/types.rs`
|
||||||
|
- `src/config/defaults.rs`
|
||||||
|
- `src/config/load.rs`
|
||||||
|
- `src/transport/upstream.rs`
|
||||||
|
|
||||||
|
Die unten angegebenen `Default`-Werte sind Code-Defaults (bei fehlendem Schlüssel), nicht zwingend die Werte aus `config.full.toml`.
|
||||||
|
|
||||||
|
## Middle-End-Parameter
|
||||||
|
|
||||||
|
### 1) ME-Grundmodus, NAT und STUN
|
||||||
|
|
||||||
|
| Parameter | Typ | Default | Einschränkungen / Validierung | Laufzeiteffekt | Beispiel |
|
||||||
|
|---|---|---:|---|---|---|
|
||||||
|
| `general.use_middle_proxy` | `bool` | `true` | keine | Aktiviert den ME-Transportmodus. Bei `false` wird Direct-Modus verwendet. | `use_middle_proxy = true` |
|
||||||
|
| `general.proxy_secret_path` | `Option<String>` | `"proxy-secret"` | Pfad kann `null` sein | Pfad zur Telegram-Infrastrukturdatei `proxy-secret`. | `proxy_secret_path = "proxy-secret"` |
|
||||||
|
| `general.middle_proxy_nat_ip` | `Option<IpAddr>` | `null` | gültige IP bei gesetztem Wert | Manueller Override der öffentlichen NAT-IP für ME-Adressmaterial. | `middle_proxy_nat_ip = "203.0.113.10"` |
|
||||||
|
| `general.middle_proxy_nat_probe` | `bool` | `true` | wird auf `true` erzwungen, wenn `use_middle_proxy=true` | Aktiviert NAT-Probing für ME. | `middle_proxy_nat_probe = true` |
|
||||||
|
| `general.stun_nat_probe_concurrency` | `usize` | `8` | muss `> 0` sein | Maximale parallele STUN-Probes während NAT-Erkennung. | `stun_nat_probe_concurrency = 16` |
|
||||||
|
| `network.stun_use` | `bool` | `true` | keine | Globaler STUN-Schalter. Bei `false` wird STUN deaktiviert. | `stun_use = true` |
|
||||||
|
| `network.stun_servers` | `Vec<String>` | integrierter öffentlicher Pool | Duplikate/leer werden entfernt | Primäre STUN-Serverliste für NAT/Public-Endpoint-Erkennung. | `stun_servers = ["stun1.l.google.com:19302"]` |
|
||||||
|
| `network.stun_tcp_fallback` | `bool` | `true` | keine | Aktiviert TCP-Fallback, wenn UDP-STUN blockiert ist. | `stun_tcp_fallback = true` |
|
||||||
|
| `network.http_ip_detect_urls` | `Vec<String>` | `ifconfig.me` + `api.ipify.org` | keine | HTTP-Fallback zur öffentlichen IPv4-Erkennung, falls STUN ausfällt. | `http_ip_detect_urls = ["https://api.ipify.org"]` |
|
||||||
|
| `general.stun_iface_mismatch_ignore` | `bool` | `false` | keine | Reserviertes Feld in der aktuellen Revision (derzeit kein aktiver Runtime-Verbrauch). | `stun_iface_mismatch_ignore = false` |
|
||||||
|
| `timeouts.me_one_retry` | `u8` | `12` | keine | Anzahl schneller Reconnect-Versuche bei Single-Endpoint-DC-Fällen. | `me_one_retry = 6` |
|
||||||
|
| `timeouts.me_one_timeout_ms` | `u64` | `1200` | keine | Timeout pro schnellem Einzelversuch (ms). | `me_one_timeout_ms = 1500` |
|
||||||
|
|
||||||
|
### 2) Poolgröße, Keepalive und Reconnect-Policy
|
||||||
|
|
||||||
|
| Parameter | Typ | Default | Einschränkungen / Validierung | Laufzeiteffekt | Beispiel |
|
||||||
|
|---|---|---:|---|---|---|
|
||||||
|
| `general.middle_proxy_pool_size` | `usize` | `8` | keine | Zielgröße des aktiven ME-Writer-Pools. | `middle_proxy_pool_size = 12` |
|
||||||
|
| `general.middle_proxy_warm_standby` | `usize` | `16` | keine | Reserviertes Kompatibilitätsfeld in der aktuellen Revision (kein aktiver Runtime-Consumer). | `middle_proxy_warm_standby = 16` |
|
||||||
|
| `general.me_keepalive_enabled` | `bool` | `true` | keine | Aktiviert periodischen ME-Keepalive/Ping-Traffic. | `me_keepalive_enabled = true` |
|
||||||
|
| `general.me_keepalive_interval_secs` | `u64` | `25` | keine | Basisintervall für Keepalive (Sekunden). | `me_keepalive_interval_secs = 20` |
|
||||||
|
| `general.me_keepalive_jitter_secs` | `u64` | `5` | keine | Keepalive-Jitter zur Vermeidung synchroner Peaks. | `me_keepalive_jitter_secs = 3` |
|
||||||
|
| `general.me_keepalive_payload_random` | `bool` | `true` | keine | Randomisiert Keepalive-Payload-Bytes. | `me_keepalive_payload_random = true` |
|
||||||
|
| `general.me_warmup_stagger_enabled` | `bool` | `true` | keine | Aktiviert gestaffeltes Warmup zusätzlicher ME-Verbindungen. | `me_warmup_stagger_enabled = true` |
|
||||||
|
| `general.me_warmup_step_delay_ms` | `u64` | `500` | keine | Basisverzögerung zwischen Warmup-Schritten (ms). | `me_warmup_step_delay_ms = 300` |
|
||||||
|
| `general.me_warmup_step_jitter_ms` | `u64` | `300` | keine | Zusätzlicher zufälliger Warmup-Jitter (ms). | `me_warmup_step_jitter_ms = 200` |
|
||||||
|
| `general.me_reconnect_max_concurrent_per_dc` | `u32` | `8` | keine | Begrenzung paralleler Reconnect-Worker pro DC. | `me_reconnect_max_concurrent_per_dc = 12` |
|
||||||
|
| `general.me_reconnect_backoff_base_ms` | `u64` | `500` | keine | Initiales Reconnect-Backoff (ms). | `me_reconnect_backoff_base_ms = 250` |
|
||||||
|
| `general.me_reconnect_backoff_cap_ms` | `u64` | `30000` | keine | Maximales Reconnect-Backoff (ms). | `me_reconnect_backoff_cap_ms = 10000` |
|
||||||
|
| `general.me_reconnect_fast_retry_count` | `u32` | `16` | keine | Budget für Sofort-Retries vor längerem Backoff. | `me_reconnect_fast_retry_count = 8` |
|
||||||
|
|
||||||
|
### 3) Reinit/Hardswap, Secret-Rotation und Degradation
|
||||||
|
|
||||||
|
| Parameter | Typ | Default | Einschränkungen / Validierung | Laufzeiteffekt | Beispiel |
|
||||||
|
|---|---|---:|---|---|---|
|
||||||
|
| `general.hardswap` | `bool` | `true` | keine | Aktiviert generation-basierte Hardswap-Strategie für den ME-Pool. | `hardswap = true` |
|
||||||
|
| `general.me_reinit_every_secs` | `u64` | `900` | muss `> 0` sein | Intervall für periodische ME-Reinitialisierung. | `me_reinit_every_secs = 600` |
|
||||||
|
| `general.me_hardswap_warmup_delay_min_ms` | `u64` | `1000` | muss `<= me_hardswap_warmup_delay_max_ms` sein | Untere Grenze für Warmup-Dial-Abstände. | `me_hardswap_warmup_delay_min_ms = 500` |
|
||||||
|
| `general.me_hardswap_warmup_delay_max_ms` | `u64` | `2000` | muss `> 0` sein | Obere Grenze für Warmup-Dial-Abstände. | `me_hardswap_warmup_delay_max_ms = 1200` |
|
||||||
|
| `general.me_hardswap_warmup_extra_passes` | `u8` | `3` | Bereich `[0,10]` | Zusätzliche Warmup-Pässe nach dem Basispass. | `me_hardswap_warmup_extra_passes = 2` |
|
||||||
|
| `general.me_hardswap_warmup_pass_backoff_base_ms` | `u64` | `500` | muss `> 0` sein | Basis-Backoff zwischen zusätzlichen Warmup-Pässen. | `me_hardswap_warmup_pass_backoff_base_ms = 400` |
|
||||||
|
| `general.me_config_stable_snapshots` | `u8` | `2` | muss `> 0` sein | Anzahl identischer ME-Config-Snapshots vor Apply. | `me_config_stable_snapshots = 3` |
|
||||||
|
| `general.me_config_apply_cooldown_secs` | `u64` | `300` | keine | Cooldown zwischen angewendeten ME-Map-Updates. | `me_config_apply_cooldown_secs = 120` |
|
||||||
|
| `general.proxy_secret_stable_snapshots` | `u8` | `2` | muss `> 0` sein | Anzahl identischer Secret-Snapshots vor Rotation. | `proxy_secret_stable_snapshots = 3` |
|
||||||
|
| `general.proxy_secret_rotate_runtime` | `bool` | `true` | keine | Aktiviert Runtime-Rotation des Proxy-Secrets. | `proxy_secret_rotate_runtime = true` |
|
||||||
|
| `general.proxy_secret_len_max` | `usize` | `256` | Bereich `[32,4096]` | Obergrenze für akzeptierte Secret-Länge. | `proxy_secret_len_max = 512` |
|
||||||
|
| `general.update_every` | `Option<u64>` | `300` | wenn gesetzt: `> 0`; bei `null`: Legacy-Min-Fallback | Einheitliches Refresh-Intervall für ME-Config + Secret-Updater. | `update_every = 300` |
|
||||||
|
| `general.me_pool_drain_ttl_secs` | `u64` | `90` | keine | Zeitraum, in dem stale Writer noch als Fallback zulässig sind. | `me_pool_drain_ttl_secs = 120` |
|
||||||
|
| `general.me_pool_min_fresh_ratio` | `f32` | `0.8` | Bereich `[0.0,1.0]` | Coverage-Schwelle vor Drain der alten Generation. | `me_pool_min_fresh_ratio = 0.9` |
|
||||||
|
| `general.me_reinit_drain_timeout_secs` | `u64` | `120` | `0` = kein Force-Close; wenn `>0 && < TTL`, dann auf TTL angehoben | Force-Close-Timeout für draining stale Writer. | `me_reinit_drain_timeout_secs = 0` |
|
||||||
|
| `general.auto_degradation_enabled` | `bool` | `true` | keine | Reserviertes Kompatibilitätsfeld in aktueller Revision (kein aktiver Runtime-Consumer). | `auto_degradation_enabled = true` |
|
||||||
|
| `general.degradation_min_unavailable_dc_groups` | `u8` | `2` | keine | Reservierter Kompatibilitäts-Schwellenwert in aktueller Revision (kein aktiver Runtime-Consumer). | `degradation_min_unavailable_dc_groups = 2` |
|
||||||
|
|
||||||
|
## Deprecated / Legacy Parameter
|
||||||
|
|
||||||
|
| Parameter | Status | Ersatz | Aktuelles Verhalten | Migrationshinweis |
|
||||||
|
|---|---|---|---|---|
|
||||||
|
| `general.middle_proxy_nat_stun` | Deprecated | `network.stun_servers` | Wird nur dann in `network.stun_servers` gemerged, wenn `network.stun_servers` nicht explizit gesetzt ist. | Wert nach `network.stun_servers` verschieben, Legacy-Key entfernen. |
|
||||||
|
| `general.middle_proxy_nat_stun_servers` | Deprecated | `network.stun_servers` | Wird nur dann in `network.stun_servers` gemerged, wenn `network.stun_servers` nicht explizit gesetzt ist. | Werte nach `network.stun_servers` verschieben, Legacy-Key entfernen. |
|
||||||
|
| `general.proxy_secret_auto_reload_secs` | Deprecated | `general.update_every` | Nur aktiv, wenn `update_every = null` (Legacy-Fallback). | `general.update_every` explizit setzen, Legacy-Key entfernen. |
|
||||||
|
| `general.proxy_config_auto_reload_secs` | Deprecated | `general.update_every` | Nur aktiv, wenn `update_every = null` (Legacy-Fallback). | `general.update_every` explizit setzen, Legacy-Key entfernen. |
|
||||||
|
|
||||||
|
## Wie Upstreams konfiguriert werden
|
||||||
|
|
||||||
|
### Upstream-Schema
|
||||||
|
|
||||||
|
| Feld | Gilt für | Typ | Pflicht | Default | Bedeutung |
|
||||||
|
|---|---|---|---|---|---|
|
||||||
|
| `[[upstreams]].type` | alle Upstreams | `"direct" \| "socks4" \| "socks5"` | ja | n/a | Upstream-Transporttyp. |
|
||||||
|
| `[[upstreams]].weight` | alle Upstreams | `u16` | nein | `1` | Basisgewicht für weighted-random Auswahl. |
|
||||||
|
| `[[upstreams]].enabled` | alle Upstreams | `bool` | nein | `true` | Deaktivierte Einträge werden beim Start ignoriert. |
|
||||||
|
| `[[upstreams]].scopes` | alle Upstreams | `String` | nein | `""` | Komma-separierte Scope-Tags für Request-Routing. |
|
||||||
|
| `interface` | `direct` | `Option<String>` | nein | `null` | Interface-Name (z. B. `eth0`) oder lokale Literal-IP. |
|
||||||
|
| `bind_addresses` | `direct` | `Option<Vec<IpAddr>>` | nein | `null` | Explizite Source-IP-Kandidaten (strikter Vorrang vor `interface`). |
|
||||||
|
| `address` | `socks4` | `String` | ja | n/a | SOCKS4-Server (`ip:port` oder `host:port`). |
|
||||||
|
| `interface` | `socks4` | `Option<String>` | nein | `null` | Wird nur genutzt, wenn `address` als `ip:port` angegeben ist. |
|
||||||
|
| `user_id` | `socks4` | `Option<String>` | nein | `null` | SOCKS4 User-ID für CONNECT. |
|
||||||
|
| `address` | `socks5` | `String` | ja | n/a | SOCKS5-Server (`ip:port` oder `host:port`). |
|
||||||
|
| `interface` | `socks5` | `Option<String>` | nein | `null` | Wird nur genutzt, wenn `address` als `ip:port` angegeben ist. |
|
||||||
|
| `username` | `socks5` | `Option<String>` | nein | `null` | SOCKS5 Benutzername. |
|
||||||
|
| `password` | `socks5` | `Option<String>` | nein | `null` | SOCKS5 Passwort. |
|
||||||
|
|
||||||
|
### Runtime-Regeln (wichtig)
|
||||||
|
|
||||||
|
1. Wenn `[[upstreams]]` fehlt, injiziert der Loader einen Default-`direct`-Upstream.
|
||||||
|
2. Scope-Filterung basiert auf exaktem Token-Match:
|
||||||
|
- mit Request-Scope -> nur Einträge, deren `scopes` genau dieses Token enthält;
|
||||||
|
- ohne Request-Scope -> nur Einträge mit leerem `scopes`.
|
||||||
|
3. Unter healthy Upstreams erfolgt die Auswahl per weighted random: `weight * latency_factor`.
|
||||||
|
4. Gibt es im gefilterten Set keinen healthy Upstream, wird zufällig aus dem gefilterten Set gewählt.
|
||||||
|
5. `direct`-Bind-Auflösung:
|
||||||
|
- zuerst `bind_addresses` (nur gleiche IP-Familie wie Target);
|
||||||
|
- bei `interface` (Name) + `bind_addresses` wird jede Candidate-IP gegen Interface-Adressen validiert;
|
||||||
|
- ungültige Kandidaten werden mit `WARN` verworfen;
|
||||||
|
- bleiben keine gültigen Kandidaten übrig, erfolgt unbound direct connect (`bind_ip=None`);
|
||||||
|
- wenn `bind_addresses` nicht passt, wird `interface` verwendet (Literal-IP oder Interface-Primäradresse).
|
||||||
|
6. Für `socks4/socks5` mit Hostname-`address` ist Interface-Binding nicht unterstützt und wird mit Warnung ignoriert.
|
||||||
|
7. Runtime DNS Overrides werden für Hostname-Auflösung bei Upstream-Verbindungen genutzt.
|
||||||
|
8. Im ME-Modus wird der gewählte Upstream auch für den ME-TCP-Dial-Pfad verwendet.
|
||||||
|
9. Im ME-Modus ist bei `direct` mit bind/interface die STUN-Reflection bind-aware für KDF-Adressmaterial.
|
||||||
|
10. Im ME-Modus werden bei SOCKS-Upstream `BND.ADDR/BND.PORT` für KDF verwendet, wenn gültig/öffentlich und gleiche IP-Familie.
|
||||||
|
|
||||||
|
## Upstream-Konfigurationsbeispiele
|
||||||
|
|
||||||
|
### Beispiel 1: Minimaler direct Upstream
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[upstreams]]
|
||||||
|
type = "direct"
|
||||||
|
weight = 1
|
||||||
|
enabled = true
|
||||||
|
```
|
||||||
|
|
||||||
|
### Beispiel 2: direct mit Interface + expliziten bind IPs
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[upstreams]]
|
||||||
|
type = "direct"
|
||||||
|
interface = "eth0"
|
||||||
|
bind_addresses = ["192.168.1.100", "192.168.1.101"]
|
||||||
|
weight = 3
|
||||||
|
enabled = true
|
||||||
|
```
|
||||||
|
|
||||||
|
### Beispiel 3: SOCKS5 Upstream mit Authentifizierung
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[upstreams]]
|
||||||
|
type = "socks5"
|
||||||
|
address = "198.51.100.30:1080"
|
||||||
|
username = "proxy-user"
|
||||||
|
password = "proxy-pass"
|
||||||
|
weight = 2
|
||||||
|
enabled = true
|
||||||
|
```
|
||||||
|
|
||||||
|
### Beispiel 4: Gemischte Upstreams mit Scopes
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[upstreams]]
|
||||||
|
type = "direct"
|
||||||
|
weight = 5
|
||||||
|
enabled = true
|
||||||
|
scopes = ""
|
||||||
|
|
||||||
|
[[upstreams]]
|
||||||
|
type = "socks5"
|
||||||
|
address = "203.0.113.40:1080"
|
||||||
|
username = "edge"
|
||||||
|
password = "edgepass"
|
||||||
|
weight = 3
|
||||||
|
enabled = true
|
||||||
|
scopes = "premium,me"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Beispiel 5: ME-orientiertes Tuning-Profil
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[general]
|
||||||
|
use_middle_proxy = true
|
||||||
|
proxy_secret_path = "proxy-secret"
|
||||||
|
middle_proxy_nat_probe = true
|
||||||
|
stun_nat_probe_concurrency = 16
|
||||||
|
middle_proxy_pool_size = 12
|
||||||
|
me_keepalive_enabled = true
|
||||||
|
me_keepalive_interval_secs = 20
|
||||||
|
me_keepalive_jitter_secs = 4
|
||||||
|
me_reconnect_max_concurrent_per_dc = 12
|
||||||
|
me_reconnect_backoff_base_ms = 300
|
||||||
|
me_reconnect_backoff_cap_ms = 10000
|
||||||
|
me_reconnect_fast_retry_count = 10
|
||||||
|
hardswap = true
|
||||||
|
me_reinit_every_secs = 600
|
||||||
|
me_hardswap_warmup_delay_min_ms = 500
|
||||||
|
me_hardswap_warmup_delay_max_ms = 1200
|
||||||
|
me_hardswap_warmup_extra_passes = 2
|
||||||
|
me_hardswap_warmup_pass_backoff_base_ms = 400
|
||||||
|
me_config_stable_snapshots = 3
|
||||||
|
me_config_apply_cooldown_secs = 120
|
||||||
|
proxy_secret_stable_snapshots = 3
|
||||||
|
proxy_secret_rotate_runtime = true
|
||||||
|
proxy_secret_len_max = 512
|
||||||
|
update_every = 300
|
||||||
|
me_pool_drain_ttl_secs = 120
|
||||||
|
me_pool_min_fresh_ratio = 0.9
|
||||||
|
me_reinit_drain_timeout_secs = 180
|
||||||
|
|
||||||
|
[timeouts]
|
||||||
|
me_one_retry = 8
|
||||||
|
me_one_timeout_ms = 1200
|
||||||
|
|
||||||
|
[network]
|
||||||
|
stun_use = true
|
||||||
|
stun_tcp_fallback = true
|
||||||
|
stun_servers = [
|
||||||
|
"stun1.l.google.com:19302",
|
||||||
|
"stun2.l.google.com:19302"
|
||||||
|
]
|
||||||
|
http_ip_detect_urls = [
|
||||||
|
"https://api.ipify.org",
|
||||||
|
"https://ifconfig.me/ip"
|
||||||
|
]
|
||||||
|
```
|
||||||
219
docs/TUNING.en.md
Normal file
219
docs/TUNING.en.md
Normal file
@@ -0,0 +1,219 @@
|
|||||||
|
# Telemt Tuning Guide: Middle-End and Upstreams
|
||||||
|
|
||||||
|
This document describes the current runtime behavior for Middle-End (ME) and upstream routing based on:
|
||||||
|
- `src/config/types.rs`
|
||||||
|
- `src/config/defaults.rs`
|
||||||
|
- `src/config/load.rs`
|
||||||
|
- `src/transport/upstream.rs`
|
||||||
|
|
||||||
|
Defaults below are code defaults (used when a key is omitted), not necessarily values from `config.full.toml` examples.
|
||||||
|
|
||||||
|
## Middle-End Parameters
|
||||||
|
|
||||||
|
### 1) Core ME mode, NAT, and STUN
|
||||||
|
|
||||||
|
| Parameter | Type | Default | Constraints / validation | Runtime effect | Example |
|
||||||
|
|---|---|---:|---|---|---|
|
||||||
|
| `general.use_middle_proxy` | `bool` | `true` | none | Enables ME transport mode. If `false`, Direct mode is used. | `use_middle_proxy = true` |
|
||||||
|
| `general.proxy_secret_path` | `Option<String>` | `"proxy-secret"` | path may be `null` | Path to Telegram infrastructure proxy-secret file. | `proxy_secret_path = "proxy-secret"` |
|
||||||
|
| `general.middle_proxy_nat_ip` | `Option<IpAddr>` | `null` | valid IP when set | Manual public NAT IP override for ME address material. | `middle_proxy_nat_ip = "203.0.113.10"` |
|
||||||
|
| `general.middle_proxy_nat_probe` | `bool` | `true` | auto-forced to `true` when `use_middle_proxy=true` | Enables ME NAT probing. | `middle_proxy_nat_probe = true` |
|
||||||
|
| `general.stun_nat_probe_concurrency` | `usize` | `8` | must be `> 0` | Max parallel STUN probes during NAT discovery. | `stun_nat_probe_concurrency = 16` |
|
||||||
|
| `network.stun_use` | `bool` | `true` | none | Global STUN switch. If `false`, STUN probing is disabled. | `stun_use = true` |
|
||||||
|
| `network.stun_servers` | `Vec<String>` | built-in public pool | deduplicated + empty values removed | Primary STUN server list for NAT/public endpoint discovery. | `stun_servers = ["stun1.l.google.com:19302"]` |
|
||||||
|
| `network.stun_tcp_fallback` | `bool` | `true` | none | Enables TCP fallback path when UDP STUN is blocked. | `stun_tcp_fallback = true` |
|
||||||
|
| `network.http_ip_detect_urls` | `Vec<String>` | `ifconfig.me` + `api.ipify.org` | none | HTTP fallback for public IPv4 detection if STUN is unavailable. | `http_ip_detect_urls = ["https://api.ipify.org"]` |
|
||||||
|
| `general.stun_iface_mismatch_ignore` | `bool` | `false` | none | Reserved flag in current revision (not consumed by runtime path). | `stun_iface_mismatch_ignore = false` |
|
||||||
|
| `timeouts.me_one_retry` | `u8` | `12` | none | Fast reconnect attempts for single-endpoint DC cases. | `me_one_retry = 6` |
|
||||||
|
| `timeouts.me_one_timeout_ms` | `u64` | `1200` | none | Timeout per quick single-endpoint attempt (ms). | `me_one_timeout_ms = 1500` |
|
||||||
|
|
||||||
|
### 2) Pool size, keepalive, and reconnect policy
|
||||||
|
|
||||||
|
| Parameter | Type | Default | Constraints / validation | Runtime effect | Example |
|
||||||
|
|---|---|---:|---|---|---|
|
||||||
|
| `general.middle_proxy_pool_size` | `usize` | `8` | none | Target active ME writer pool size. | `middle_proxy_pool_size = 12` |
|
||||||
|
| `general.middle_proxy_warm_standby` | `usize` | `16` | none | Reserved compatibility field in current revision (no active runtime consumer). | `middle_proxy_warm_standby = 16` |
|
||||||
|
| `general.me_keepalive_enabled` | `bool` | `true` | none | Enables periodic ME keepalive/ping traffic. | `me_keepalive_enabled = true` |
|
||||||
|
| `general.me_keepalive_interval_secs` | `u64` | `25` | none | Base keepalive interval (seconds). | `me_keepalive_interval_secs = 20` |
|
||||||
|
| `general.me_keepalive_jitter_secs` | `u64` | `5` | none | Keepalive jitter to avoid synchronization bursts. | `me_keepalive_jitter_secs = 3` |
|
||||||
|
| `general.me_keepalive_payload_random` | `bool` | `true` | none | Randomizes keepalive payload bytes. | `me_keepalive_payload_random = true` |
|
||||||
|
| `general.me_warmup_stagger_enabled` | `bool` | `true` | none | Staggers extra ME warmup dials to avoid spikes. | `me_warmup_stagger_enabled = true` |
|
||||||
|
| `general.me_warmup_step_delay_ms` | `u64` | `500` | none | Base delay between warmup dial steps (ms). | `me_warmup_step_delay_ms = 300` |
|
||||||
|
| `general.me_warmup_step_jitter_ms` | `u64` | `300` | none | Additional random delay for warmup steps (ms). | `me_warmup_step_jitter_ms = 200` |
|
||||||
|
| `general.me_reconnect_max_concurrent_per_dc` | `u32` | `8` | none | Limits concurrent reconnect workers per DC in health recovery. | `me_reconnect_max_concurrent_per_dc = 12` |
|
||||||
|
| `general.me_reconnect_backoff_base_ms` | `u64` | `500` | none | Initial reconnect backoff (ms). | `me_reconnect_backoff_base_ms = 250` |
|
||||||
|
| `general.me_reconnect_backoff_cap_ms` | `u64` | `30000` | none | Maximum reconnect backoff (ms). | `me_reconnect_backoff_cap_ms = 10000` |
|
||||||
|
| `general.me_reconnect_fast_retry_count` | `u32` | `16` | none | Immediate retry budget before long backoff behavior. | `me_reconnect_fast_retry_count = 8` |
|
||||||
|
|
||||||
|
### 3) Reinit/hardswap, secret rotation, and degradation
|
||||||
|
|
||||||
|
| Parameter | Type | Default | Constraints / validation | Runtime effect | Example |
|
||||||
|
|---|---|---:|---|---|---|
|
||||||
|
| `general.hardswap` | `bool` | `true` | none | Enables generation-based ME hardswap strategy. | `hardswap = true` |
|
||||||
|
| `general.me_reinit_every_secs` | `u64` | `900` | must be `> 0` | Periodic ME reinit interval. | `me_reinit_every_secs = 600` |
|
||||||
|
| `general.me_hardswap_warmup_delay_min_ms` | `u64` | `1000` | must be `<= me_hardswap_warmup_delay_max_ms` | Lower bound for hardswap warmup dial spacing. | `me_hardswap_warmup_delay_min_ms = 500` |
|
||||||
|
| `general.me_hardswap_warmup_delay_max_ms` | `u64` | `2000` | must be `> 0` | Upper bound for hardswap warmup dial spacing. | `me_hardswap_warmup_delay_max_ms = 1200` |
|
||||||
|
| `general.me_hardswap_warmup_extra_passes` | `u8` | `3` | must be within `[0,10]` | Additional warmup passes after base pass. | `me_hardswap_warmup_extra_passes = 2` |
|
||||||
|
| `general.me_hardswap_warmup_pass_backoff_base_ms` | `u64` | `500` | must be `> 0` | Base backoff between extra warmup passes. | `me_hardswap_warmup_pass_backoff_base_ms = 400` |
|
||||||
|
| `general.me_config_stable_snapshots` | `u8` | `2` | must be `> 0` | Number of identical ME config snapshots required before apply. | `me_config_stable_snapshots = 3` |
|
||||||
|
| `general.me_config_apply_cooldown_secs` | `u64` | `300` | none | Cooldown between applied ME map updates. | `me_config_apply_cooldown_secs = 120` |
|
||||||
|
| `general.proxy_secret_stable_snapshots` | `u8` | `2` | must be `> 0` | Number of identical proxy-secret snapshots required before rotation. | `proxy_secret_stable_snapshots = 3` |
|
||||||
|
| `general.proxy_secret_rotate_runtime` | `bool` | `true` | none | Enables runtime proxy-secret rotation. | `proxy_secret_rotate_runtime = true` |
|
||||||
|
| `general.proxy_secret_len_max` | `usize` | `256` | must be within `[32,4096]` | Upper limit for accepted proxy-secret length. | `proxy_secret_len_max = 512` |
|
||||||
|
| `general.update_every` | `Option<u64>` | `300` | if set: must be `> 0`; if `null`: legacy min fallback | Unified refresh interval for ME config + secret updater. | `update_every = 300` |
|
||||||
|
| `general.me_pool_drain_ttl_secs` | `u64` | `90` | none | Time window where stale writers remain fallback-eligible. | `me_pool_drain_ttl_secs = 120` |
|
||||||
|
| `general.me_pool_min_fresh_ratio` | `f32` | `0.8` | must be within `[0.0,1.0]` | Coverage threshold before stale generation can be drained. | `me_pool_min_fresh_ratio = 0.9` |
|
||||||
|
| `general.me_reinit_drain_timeout_secs` | `u64` | `120` | `0` means no force-close; if `>0 && < TTL` it is bumped to TTL | Force-close timeout for draining stale writers. | `me_reinit_drain_timeout_secs = 0` |
|
||||||
|
| `general.auto_degradation_enabled` | `bool` | `true` | none | Reserved compatibility flag in current revision (no active runtime consumer). | `auto_degradation_enabled = true` |
|
||||||
|
| `general.degradation_min_unavailable_dc_groups` | `u8` | `2` | none | Reserved compatibility threshold in current revision (no active runtime consumer). | `degradation_min_unavailable_dc_groups = 2` |
|
||||||
|
|
||||||
|
## Deprecated / Legacy Parameters
|
||||||
|
|
||||||
|
| Parameter | Status | Replacement | Current behavior | Migration recommendation |
|
||||||
|
|---|---|---|---|---|
|
||||||
|
| `general.middle_proxy_nat_stun` | Deprecated | `network.stun_servers` | Merged into `network.stun_servers` only when `network.stun_servers` is not explicitly set. | Move value into `network.stun_servers` and remove legacy key. |
|
||||||
|
| `general.middle_proxy_nat_stun_servers` | Deprecated | `network.stun_servers` | Merged into `network.stun_servers` only when `network.stun_servers` is not explicitly set. | Move values into `network.stun_servers` and remove legacy key. |
|
||||||
|
| `general.proxy_secret_auto_reload_secs` | Deprecated | `general.update_every` | Used only when `update_every = null` (legacy fallback path). | Set `general.update_every` explicitly and remove legacy key. |
|
||||||
|
| `general.proxy_config_auto_reload_secs` | Deprecated | `general.update_every` | Used only when `update_every = null` (legacy fallback path). | Set `general.update_every` explicitly and remove legacy key. |
|
||||||
|
|
||||||
|
## How Upstreams Are Configured
|
||||||
|
|
||||||
|
### Upstream schema
|
||||||
|
|
||||||
|
| Field | Applies to | Type | Required | Default | Meaning |
|
||||||
|
|---|---|---|---|---|---|
|
||||||
|
| `[[upstreams]].type` | all upstreams | `"direct" \| "socks4" \| "socks5"` | yes | n/a | Upstream transport type. |
|
||||||
|
| `[[upstreams]].weight` | all upstreams | `u16` | no | `1` | Base weight for weighted-random selection. |
|
||||||
|
| `[[upstreams]].enabled` | all upstreams | `bool` | no | `true` | Disabled entries are ignored at startup. |
|
||||||
|
| `[[upstreams]].scopes` | all upstreams | `String` | no | `""` | Comma-separated scope tags for request-level routing. |
|
||||||
|
| `interface` | `direct` | `Option<String>` | no | `null` | Interface name (e.g. `eth0`) or literal local IP for bind selection. |
|
||||||
|
| `bind_addresses` | `direct` | `Option<Vec<IpAddr>>` | no | `null` | Explicit local source IP candidates (strict priority over `interface`). |
|
||||||
|
| `address` | `socks4` | `String` | yes | n/a | SOCKS4 server endpoint (`ip:port` or `host:port`). |
|
||||||
|
| `interface` | `socks4` | `Option<String>` | no | `null` | Used only for SOCKS server `ip:port` dial path. |
|
||||||
|
| `user_id` | `socks4` | `Option<String>` | no | `null` | SOCKS4 user ID for CONNECT request. |
|
||||||
|
| `address` | `socks5` | `String` | yes | n/a | SOCKS5 server endpoint (`ip:port` or `host:port`). |
|
||||||
|
| `interface` | `socks5` | `Option<String>` | no | `null` | Used only for SOCKS server `ip:port` dial path. |
|
||||||
|
| `username` | `socks5` | `Option<String>` | no | `null` | SOCKS5 username auth. |
|
||||||
|
| `password` | `socks5` | `Option<String>` | no | `null` | SOCKS5 password auth. |
|
||||||
|
|
||||||
|
### Runtime rules (important)
|
||||||
|
|
||||||
|
1. If `[[upstreams]]` is omitted, loader injects one default `direct` upstream.
|
||||||
|
2. Scope filtering is exact-token based:
|
||||||
|
- when request scope is set -> only entries whose `scopes` contains that exact token;
|
||||||
|
- when request scope is not set -> only entries with empty `scopes`.
|
||||||
|
3. Healthy upstreams are selected by weighted random using: `weight * latency_factor`.
|
||||||
|
4. If no healthy upstream exists in filtered set, random selection is used among filtered entries.
|
||||||
|
5. `direct` bind resolution order:
|
||||||
|
- `bind_addresses` candidates (same IP family as target) first;
|
||||||
|
- if `interface` is an interface name and `bind_addresses` is set, each candidate IP is validated against addresses currently assigned to that interface;
|
||||||
|
- invalid candidates are dropped with `WARN`;
|
||||||
|
- if no valid candidate remains, connection falls back to unbound direct connect (`bind_ip=None`);
|
||||||
|
- if no `bind_addresses` candidate, `interface` is used (literal IP or resolved interface primary IP).
|
||||||
|
6. For `socks4/socks5` with `address` as hostname, interface binding is not supported and is ignored with warning.
|
||||||
|
7. Runtime DNS overrides are used for upstream hostname resolution.
|
||||||
|
8. In ME mode, the selected upstream is also used for ME TCP dial path.
|
||||||
|
9. In ME mode for `direct` upstream with bind/interface, STUN reflection logic is bind-aware for KDF source material.
|
||||||
|
10. In ME mode for SOCKS upstream, SOCKS `BND.ADDR/BND.PORT` is used for KDF when it is valid/public for the same family.
|
||||||
|
|
||||||
|
## Upstream Configuration Examples
|
||||||
|
|
||||||
|
### Example 1: Minimal direct upstream
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[upstreams]]
|
||||||
|
type = "direct"
|
||||||
|
weight = 1
|
||||||
|
enabled = true
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example 2: Direct with interface + explicit bind addresses
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[upstreams]]
|
||||||
|
type = "direct"
|
||||||
|
interface = "eth0"
|
||||||
|
bind_addresses = ["192.168.1.100", "192.168.1.101"]
|
||||||
|
weight = 3
|
||||||
|
enabled = true
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example 3: SOCKS5 upstream with authentication
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[upstreams]]
|
||||||
|
type = "socks5"
|
||||||
|
address = "198.51.100.30:1080"
|
||||||
|
username = "proxy-user"
|
||||||
|
password = "proxy-pass"
|
||||||
|
weight = 2
|
||||||
|
enabled = true
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example 4: Mixed upstreams with scopes
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[upstreams]]
|
||||||
|
type = "direct"
|
||||||
|
weight = 5
|
||||||
|
enabled = true
|
||||||
|
scopes = ""
|
||||||
|
|
||||||
|
[[upstreams]]
|
||||||
|
type = "socks5"
|
||||||
|
address = "203.0.113.40:1080"
|
||||||
|
username = "edge"
|
||||||
|
password = "edgepass"
|
||||||
|
weight = 3
|
||||||
|
enabled = true
|
||||||
|
scopes = "premium,me"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example 5: ME-focused tuning profile
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[general]
|
||||||
|
use_middle_proxy = true
|
||||||
|
proxy_secret_path = "proxy-secret"
|
||||||
|
middle_proxy_nat_probe = true
|
||||||
|
stun_nat_probe_concurrency = 16
|
||||||
|
middle_proxy_pool_size = 12
|
||||||
|
me_keepalive_enabled = true
|
||||||
|
me_keepalive_interval_secs = 20
|
||||||
|
me_keepalive_jitter_secs = 4
|
||||||
|
me_reconnect_max_concurrent_per_dc = 12
|
||||||
|
me_reconnect_backoff_base_ms = 300
|
||||||
|
me_reconnect_backoff_cap_ms = 10000
|
||||||
|
me_reconnect_fast_retry_count = 10
|
||||||
|
hardswap = true
|
||||||
|
me_reinit_every_secs = 600
|
||||||
|
me_hardswap_warmup_delay_min_ms = 500
|
||||||
|
me_hardswap_warmup_delay_max_ms = 1200
|
||||||
|
me_hardswap_warmup_extra_passes = 2
|
||||||
|
me_hardswap_warmup_pass_backoff_base_ms = 400
|
||||||
|
me_config_stable_snapshots = 3
|
||||||
|
me_config_apply_cooldown_secs = 120
|
||||||
|
proxy_secret_stable_snapshots = 3
|
||||||
|
proxy_secret_rotate_runtime = true
|
||||||
|
proxy_secret_len_max = 512
|
||||||
|
update_every = 300
|
||||||
|
me_pool_drain_ttl_secs = 120
|
||||||
|
me_pool_min_fresh_ratio = 0.9
|
||||||
|
me_reinit_drain_timeout_secs = 180
|
||||||
|
|
||||||
|
[timeouts]
|
||||||
|
me_one_retry = 8
|
||||||
|
me_one_timeout_ms = 1200
|
||||||
|
|
||||||
|
[network]
|
||||||
|
stun_use = true
|
||||||
|
stun_tcp_fallback = true
|
||||||
|
stun_servers = [
|
||||||
|
"stun1.l.google.com:19302",
|
||||||
|
"stun2.l.google.com:19302"
|
||||||
|
]
|
||||||
|
http_ip_detect_urls = [
|
||||||
|
"https://api.ipify.org",
|
||||||
|
"https://ifconfig.me/ip"
|
||||||
|
]
|
||||||
|
```
|
||||||
219
docs/TUNING.ru.md
Normal file
219
docs/TUNING.ru.md
Normal file
@@ -0,0 +1,219 @@
|
|||||||
|
# Руководство по тюнингу Telemt: Middle-End и Upstreams
|
||||||
|
|
||||||
|
Документ описывает актуальное поведение Middle-End (ME) и маршрутизации через upstream на основе:
|
||||||
|
- `src/config/types.rs`
|
||||||
|
- `src/config/defaults.rs`
|
||||||
|
- `src/config/load.rs`
|
||||||
|
- `src/transport/upstream.rs`
|
||||||
|
|
||||||
|
Значения `Default` ниже — это значения из кода при отсутствии ключа в конфиге, а не обязательно значения из примеров `config.full.toml`.
|
||||||
|
|
||||||
|
## Параметры Middle-End
|
||||||
|
|
||||||
|
### 1) Базовый режим ME, NAT и STUN
|
||||||
|
|
||||||
|
| Параметр | Тип | Default | Ограничения / валидация | Влияние на runtime | Пример |
|
||||||
|
|---|---|---:|---|---|---|
|
||||||
|
| `general.use_middle_proxy` | `bool` | `true` | нет | Включает транспорт ME. При `false` используется Direct-режим. | `use_middle_proxy = true` |
|
||||||
|
| `general.proxy_secret_path` | `Option<String>` | `"proxy-secret"` | путь может быть `null` | Путь к инфраструктурному proxy-secret Telegram. | `proxy_secret_path = "proxy-secret"` |
|
||||||
|
| `general.middle_proxy_nat_ip` | `Option<IpAddr>` | `null` | валидный IP при задании | Ручной override публичного NAT IP для адресного материала ME. | `middle_proxy_nat_ip = "203.0.113.10"` |
|
||||||
|
| `general.middle_proxy_nat_probe` | `bool` | `true` | авто-принудительно `true`, если `use_middle_proxy=true` | Включает NAT probing для ME. | `middle_proxy_nat_probe = true` |
|
||||||
|
| `general.stun_nat_probe_concurrency` | `usize` | `8` | должно быть `> 0` | Максимум параллельных STUN-проб при NAT-детекте. | `stun_nat_probe_concurrency = 16` |
|
||||||
|
| `network.stun_use` | `bool` | `true` | нет | Глобальный переключатель STUN. При `false` STUN отключен. | `stun_use = true` |
|
||||||
|
| `network.stun_servers` | `Vec<String>` | встроенный публичный пул | удаляются дубликаты и пустые значения | Основной список STUN-серверов для NAT/public endpoint discovery. | `stun_servers = ["stun1.l.google.com:19302"]` |
|
||||||
|
| `network.stun_tcp_fallback` | `bool` | `true` | нет | Включает TCP fallback, если UDP STUN недоступен. | `stun_tcp_fallback = true` |
|
||||||
|
| `network.http_ip_detect_urls` | `Vec<String>` | `ifconfig.me` + `api.ipify.org` | нет | HTTP fallback для определения публичного IPv4 при недоступности STUN. | `http_ip_detect_urls = ["https://api.ipify.org"]` |
|
||||||
|
| `general.stun_iface_mismatch_ignore` | `bool` | `false` | нет | Зарезервированный флаг в текущей ревизии (runtime его не использует). | `stun_iface_mismatch_ignore = false` |
|
||||||
|
| `timeouts.me_one_retry` | `u8` | `12` | нет | Количество быстрых reconnect-попыток для DC с одним endpoint. | `me_one_retry = 6` |
|
||||||
|
| `timeouts.me_one_timeout_ms` | `u64` | `1200` | нет | Таймаут одной быстрой попытки (мс). | `me_one_timeout_ms = 1500` |
|
||||||
|
|
||||||
|
### 2) Размер пула, keepalive и reconnect-политика
|
||||||
|
|
||||||
|
| Параметр | Тип | Default | Ограничения / валидация | Влияние на runtime | Пример |
|
||||||
|
|---|---|---:|---|---|---|
|
||||||
|
| `general.middle_proxy_pool_size` | `usize` | `8` | нет | Целевой размер активного пула ME-writer соединений. | `middle_proxy_pool_size = 12` |
|
||||||
|
| `general.middle_proxy_warm_standby` | `usize` | `16` | нет | Зарезервированное поле совместимости в текущей ревизии (активного runtime-consumer нет). | `middle_proxy_warm_standby = 16` |
|
||||||
|
| `general.me_keepalive_enabled` | `bool` | `true` | нет | Включает периодические keepalive/ping кадры ME. | `me_keepalive_enabled = true` |
|
||||||
|
| `general.me_keepalive_interval_secs` | `u64` | `25` | нет | Базовый интервал keepalive (сек). | `me_keepalive_interval_secs = 20` |
|
||||||
|
| `general.me_keepalive_jitter_secs` | `u64` | `5` | нет | Джиттер keepalive для предотвращения синхронных всплесков. | `me_keepalive_jitter_secs = 3` |
|
||||||
|
| `general.me_keepalive_payload_random` | `bool` | `true` | нет | Рандомизирует payload keepalive-кадров. | `me_keepalive_payload_random = true` |
|
||||||
|
| `general.me_warmup_stagger_enabled` | `bool` | `true` | нет | Включает staggered warmup дополнительных ME-коннектов. | `me_warmup_stagger_enabled = true` |
|
||||||
|
| `general.me_warmup_step_delay_ms` | `u64` | `500` | нет | Базовая задержка между шагами warmup (мс). | `me_warmup_step_delay_ms = 300` |
|
||||||
|
| `general.me_warmup_step_jitter_ms` | `u64` | `300` | нет | Дополнительный случайный warmup-джиттер (мс). | `me_warmup_step_jitter_ms = 200` |
|
||||||
|
| `general.me_reconnect_max_concurrent_per_dc` | `u32` | `8` | нет | Ограничивает параллельные reconnect worker'ы на один DC. | `me_reconnect_max_concurrent_per_dc = 12` |
|
||||||
|
| `general.me_reconnect_backoff_base_ms` | `u64` | `500` | нет | Начальный backoff reconnect (мс). | `me_reconnect_backoff_base_ms = 250` |
|
||||||
|
| `general.me_reconnect_backoff_cap_ms` | `u64` | `30000` | нет | Верхняя граница backoff reconnect (мс). | `me_reconnect_backoff_cap_ms = 10000` |
|
||||||
|
| `general.me_reconnect_fast_retry_count` | `u32` | `16` | нет | Бюджет быстрых retry до длинного backoff. | `me_reconnect_fast_retry_count = 8` |
|
||||||
|
|
||||||
|
### 3) Reinit/hardswap, ротация секрета и деградация
|
||||||
|
|
||||||
|
| Параметр | Тип | Default | Ограничения / валидация | Влияние на runtime | Пример |
|
||||||
|
|---|---|---:|---|---|---|
|
||||||
|
| `general.hardswap` | `bool` | `true` | нет | Включает generation-based стратегию hardswap для ME-пула. | `hardswap = true` |
|
||||||
|
| `general.me_reinit_every_secs` | `u64` | `900` | должно быть `> 0` | Интервал периодического reinit ME-пула. | `me_reinit_every_secs = 600` |
|
||||||
|
| `general.me_hardswap_warmup_delay_min_ms` | `u64` | `1000` | должно быть `<= me_hardswap_warmup_delay_max_ms` | Нижняя граница пауз между warmup dial попытками. | `me_hardswap_warmup_delay_min_ms = 500` |
|
||||||
|
| `general.me_hardswap_warmup_delay_max_ms` | `u64` | `2000` | должно быть `> 0` | Верхняя граница пауз между warmup dial попытками. | `me_hardswap_warmup_delay_max_ms = 1200` |
|
||||||
|
| `general.me_hardswap_warmup_extra_passes` | `u8` | `3` | диапазон `[0,10]` | Дополнительные warmup-проходы после базового. | `me_hardswap_warmup_extra_passes = 2` |
|
||||||
|
| `general.me_hardswap_warmup_pass_backoff_base_ms` | `u64` | `500` | должно быть `> 0` | Базовый backoff между extra-pass в warmup. | `me_hardswap_warmup_pass_backoff_base_ms = 400` |
|
||||||
|
| `general.me_config_stable_snapshots` | `u8` | `2` | должно быть `> 0` | Количество одинаковых snapshot перед применением ME map update. | `me_config_stable_snapshots = 3` |
|
||||||
|
| `general.me_config_apply_cooldown_secs` | `u64` | `300` | нет | Cooldown между применёнными обновлениями ME map. | `me_config_apply_cooldown_secs = 120` |
|
||||||
|
| `general.proxy_secret_stable_snapshots` | `u8` | `2` | должно быть `> 0` | Количество одинаковых snapshot перед runtime-rotation proxy-secret. | `proxy_secret_stable_snapshots = 3` |
|
||||||
|
| `general.proxy_secret_rotate_runtime` | `bool` | `true` | нет | Включает runtime-ротацию proxy-secret. | `proxy_secret_rotate_runtime = true` |
|
||||||
|
| `general.proxy_secret_len_max` | `usize` | `256` | диапазон `[32,4096]` | Верхний лимит длины принимаемого proxy-secret. | `proxy_secret_len_max = 512` |
|
||||||
|
| `general.update_every` | `Option<u64>` | `300` | если задано: `> 0`; если `null`: fallback на legacy минимум | Единый интервал refresh для ME config + secret updater. | `update_every = 300` |
|
||||||
|
| `general.me_pool_drain_ttl_secs` | `u64` | `90` | нет | Время, когда stale writer ещё может использоваться как fallback. | `me_pool_drain_ttl_secs = 120` |
|
||||||
|
| `general.me_pool_min_fresh_ratio` | `f32` | `0.8` | диапазон `[0.0,1.0]` | Порог покрытия fresh-поколения перед drain старого поколения. | `me_pool_min_fresh_ratio = 0.9` |
|
||||||
|
| `general.me_reinit_drain_timeout_secs` | `u64` | `120` | `0` = без force-close; если `>0 && < TTL`, поднимается до TTL | Таймаут force-close для draining stale writer. | `me_reinit_drain_timeout_secs = 0` |
|
||||||
|
| `general.auto_degradation_enabled` | `bool` | `true` | нет | Зарезервированный флаг совместимости в текущей ревизии (активного runtime-consumer нет). | `auto_degradation_enabled = true` |
|
||||||
|
| `general.degradation_min_unavailable_dc_groups` | `u8` | `2` | нет | Зарезервированный порог совместимости в текущей ревизии (активного runtime-consumer нет). | `degradation_min_unavailable_dc_groups = 2` |
|
||||||
|
|
||||||
|
## Устаревшие / legacy параметры
|
||||||
|
|
||||||
|
| Параметр | Статус | Замена | Текущее поведение | Рекомендация миграции |
|
||||||
|
|---|---|---|---|---|
|
||||||
|
| `general.middle_proxy_nat_stun` | Deprecated | `network.stun_servers` | Добавляется в `network.stun_servers`, только если `network.stun_servers` не задан явно. | Перенести значение в `network.stun_servers`, legacy-ключ удалить. |
|
||||||
|
| `general.middle_proxy_nat_stun_servers` | Deprecated | `network.stun_servers` | Добавляется в `network.stun_servers`, только если `network.stun_servers` не задан явно. | Перенести значения в `network.stun_servers`, legacy-ключ удалить. |
|
||||||
|
| `general.proxy_secret_auto_reload_secs` | Deprecated | `general.update_every` | Используется только если `update_every = null` (legacy fallback). | Явно задать `general.update_every`, legacy-ключ удалить. |
|
||||||
|
| `general.proxy_config_auto_reload_secs` | Deprecated | `general.update_every` | Используется только если `update_every = null` (legacy fallback). | Явно задать `general.update_every`, legacy-ключ удалить. |
|
||||||
|
|
||||||
|
## Как конфигурируются Upstreams
|
||||||
|
|
||||||
|
### Схема upstream
|
||||||
|
|
||||||
|
| Поле | Применимость | Тип | Обязательно | Default | Назначение |
|
||||||
|
|---|---|---|---|---|---|
|
||||||
|
| `[[upstreams]].type` | все upstream | `"direct" \| "socks4" \| "socks5"` | да | n/a | Тип upstream транспорта. |
|
||||||
|
| `[[upstreams]].weight` | все upstream | `u16` | нет | `1` | Базовый вес в weighted-random выборе. |
|
||||||
|
| `[[upstreams]].enabled` | все upstream | `bool` | нет | `true` | Выключенные записи игнорируются на старте. |
|
||||||
|
| `[[upstreams]].scopes` | все upstream | `String` | нет | `""` | Список scope-токенов через запятую для маршрутизации. |
|
||||||
|
| `interface` | `direct` | `Option<String>` | нет | `null` | Имя интерфейса (например `eth0`) или literal локальный IP. |
|
||||||
|
| `bind_addresses` | `direct` | `Option<Vec<IpAddr>>` | нет | `null` | Явные кандидаты source IP (имеют приоритет над `interface`). |
|
||||||
|
| `address` | `socks4` | `String` | да | n/a | Адрес SOCKS4 сервера (`ip:port` или `host:port`). |
|
||||||
|
| `interface` | `socks4` | `Option<String>` | нет | `null` | Используется только если `address` задан как `ip:port`. |
|
||||||
|
| `user_id` | `socks4` | `Option<String>` | нет | `null` | SOCKS4 user ID в CONNECT-запросе. |
|
||||||
|
| `address` | `socks5` | `String` | да | n/a | Адрес SOCKS5 сервера (`ip:port` или `host:port`). |
|
||||||
|
| `interface` | `socks5` | `Option<String>` | нет | `null` | Используется только если `address` задан как `ip:port`. |
|
||||||
|
| `username` | `socks5` | `Option<String>` | нет | `null` | Логин SOCKS5 auth. |
|
||||||
|
| `password` | `socks5` | `Option<String>` | нет | `null` | Пароль SOCKS5 auth. |
|
||||||
|
|
||||||
|
### Runtime-правила
|
||||||
|
|
||||||
|
1. Если `[[upstreams]]` отсутствует, loader добавляет один upstream `direct` по умолчанию.
|
||||||
|
2. Scope-фильтрация — по точному совпадению токена:
|
||||||
|
- если scope запроса задан -> используются только записи, где `scopes` содержит такой же токен;
|
||||||
|
- если scope запроса не задан -> используются только записи с пустым `scopes`.
|
||||||
|
3. Среди healthy upstream используется weighted-random выбор: `weight * latency_factor`.
|
||||||
|
4. Если в отфильтрованном наборе нет healthy upstream, выбирается случайный из отфильтрованных.
|
||||||
|
5. Порядок выбора bind для `direct`:
|
||||||
|
- сначала `bind_addresses` (только IP нужного семейства);
|
||||||
|
- если одновременно заданы `interface` (имя) и `bind_addresses`, каждый IP проверяется на принадлежность интерфейсу;
|
||||||
|
- несовпадающие IP отбрасываются с `WARN`;
|
||||||
|
- если валидных IP не осталось, используется unbound direct connect (`bind_ip=None`);
|
||||||
|
- если `bind_addresses` не подходит, применяется `interface` (literal IP или адрес интерфейса).
|
||||||
|
6. Для `socks4/socks5` с `address` в виде hostname интерфейсный bind не поддерживается и игнорируется с предупреждением.
|
||||||
|
7. Runtime DNS overrides применяются к резолвингу hostname в upstream-подключениях.
|
||||||
|
8. В ME-режиме выбранный upstream также используется для ME TCP dial path.
|
||||||
|
9. В ME-режиме для `direct` upstream с bind/interface STUN-рефлексия выполняется bind-aware для KDF материала.
|
||||||
|
10. В ME-режиме для SOCKS upstream используются `BND.ADDR/BND.PORT` для KDF, если адрес валиден/публичен и соответствует IP family.
|
||||||
|
|
||||||
|
## Примеры конфигурации Upstreams
|
||||||
|
|
||||||
|
### Пример 1: минимальный direct upstream
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[upstreams]]
|
||||||
|
type = "direct"
|
||||||
|
weight = 1
|
||||||
|
enabled = true
|
||||||
|
```
|
||||||
|
|
||||||
|
### Пример 2: direct с interface + явными bind IP
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[upstreams]]
|
||||||
|
type = "direct"
|
||||||
|
interface = "eth0"
|
||||||
|
bind_addresses = ["192.168.1.100", "192.168.1.101"]
|
||||||
|
weight = 3
|
||||||
|
enabled = true
|
||||||
|
```
|
||||||
|
|
||||||
|
### Пример 3: SOCKS5 upstream с аутентификацией
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[upstreams]]
|
||||||
|
type = "socks5"
|
||||||
|
address = "198.51.100.30:1080"
|
||||||
|
username = "proxy-user"
|
||||||
|
password = "proxy-pass"
|
||||||
|
weight = 2
|
||||||
|
enabled = true
|
||||||
|
```
|
||||||
|
|
||||||
|
### Пример 4: смешанные upstream с scopes
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[upstreams]]
|
||||||
|
type = "direct"
|
||||||
|
weight = 5
|
||||||
|
enabled = true
|
||||||
|
scopes = ""
|
||||||
|
|
||||||
|
[[upstreams]]
|
||||||
|
type = "socks5"
|
||||||
|
address = "203.0.113.40:1080"
|
||||||
|
username = "edge"
|
||||||
|
password = "edgepass"
|
||||||
|
weight = 3
|
||||||
|
enabled = true
|
||||||
|
scopes = "premium,me"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Пример 5: профиль тюнинга под ME
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[general]
|
||||||
|
use_middle_proxy = true
|
||||||
|
proxy_secret_path = "proxy-secret"
|
||||||
|
middle_proxy_nat_probe = true
|
||||||
|
stun_nat_probe_concurrency = 16
|
||||||
|
middle_proxy_pool_size = 12
|
||||||
|
me_keepalive_enabled = true
|
||||||
|
me_keepalive_interval_secs = 20
|
||||||
|
me_keepalive_jitter_secs = 4
|
||||||
|
me_reconnect_max_concurrent_per_dc = 12
|
||||||
|
me_reconnect_backoff_base_ms = 300
|
||||||
|
me_reconnect_backoff_cap_ms = 10000
|
||||||
|
me_reconnect_fast_retry_count = 10
|
||||||
|
hardswap = true
|
||||||
|
me_reinit_every_secs = 600
|
||||||
|
me_hardswap_warmup_delay_min_ms = 500
|
||||||
|
me_hardswap_warmup_delay_max_ms = 1200
|
||||||
|
me_hardswap_warmup_extra_passes = 2
|
||||||
|
me_hardswap_warmup_pass_backoff_base_ms = 400
|
||||||
|
me_config_stable_snapshots = 3
|
||||||
|
me_config_apply_cooldown_secs = 120
|
||||||
|
proxy_secret_stable_snapshots = 3
|
||||||
|
proxy_secret_rotate_runtime = true
|
||||||
|
proxy_secret_len_max = 512
|
||||||
|
update_every = 300
|
||||||
|
me_pool_drain_ttl_secs = 120
|
||||||
|
me_pool_min_fresh_ratio = 0.9
|
||||||
|
me_reinit_drain_timeout_secs = 180
|
||||||
|
|
||||||
|
[timeouts]
|
||||||
|
me_one_retry = 8
|
||||||
|
me_one_timeout_ms = 1200
|
||||||
|
|
||||||
|
[network]
|
||||||
|
stun_use = true
|
||||||
|
stun_tcp_fallback = true
|
||||||
|
stun_servers = [
|
||||||
|
"stun1.l.google.com:19302",
|
||||||
|
"stun2.l.google.com:19302"
|
||||||
|
]
|
||||||
|
http_ip_detect_urls = [
|
||||||
|
"https://api.ipify.org",
|
||||||
|
"https://ifconfig.me/ip"
|
||||||
|
]
|
||||||
|
```
|
||||||
321
docs/XRAY-SINGBOX-ROUTING.ru.md
Normal file
321
docs/XRAY-SINGBOX-ROUTING.ru.md
Normal file
@@ -0,0 +1,321 @@
|
|||||||
|
# SNI-маршрутизация в xray-core / sing-box + TLS-fronting
|
||||||
|
|
||||||
|
## Термины (в контексте этого кейса)
|
||||||
|
|
||||||
|
- **TLS-fronting домен** — домен, который фигурирует в TLS ClientHello как **SNI** (например, `petrovich.ru`): он используется как "маска" на L7 и как ключ маршрутизации в прокси-роутере.
|
||||||
|
- **xray-core / sing-box** — локальный или удалённый L7/TLS-роутер (прокси), который:
|
||||||
|
1) принимает входящее TCP/TLS-соединение,
|
||||||
|
2) читает TLS ClientHello,
|
||||||
|
3) извлекает SNI,
|
||||||
|
4) по SNI выбирает outbound/апстрим,
|
||||||
|
5) устанавливает новое TCP-соединение к целевому хосту уже **от себя**.
|
||||||
|
- **SNI (Server Name Indication)** — поле в TLS ClientHello, где клиент Telegram сообщает доменное имя для "маскировки"
|
||||||
|
- **DNS-resolve на стороне L7-роутера** — если выходной адрес задан доменом (или роутер решил "всё равно идти по SNI"), то DNS резолвится **на стороне xray/sing-box**, а не на стороне Telegram-клиента
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Ключевая идея: куда на самом деле идёт соединение решает не то, что вы указали клиенту, а то как L7-роутер трактует SNI
|
||||||
|
|
||||||
|
Механика:
|
||||||
|
|
||||||
|
1) Telegram-клиенту вы можете указать **IP/домен telemt**,как "сервер".
|
||||||
|
2) Между клиентом и telemt стоит xray-core/sing-box, который принимает TCP, читает TLS ClientHello и видит **SNI=petrovich.ru**
|
||||||
|
3) Дальше роутер говорит: "Вижу SNI - направить на апстрим/маршрут N"
|
||||||
|
4) И устанавливает исходящее соединение не "по тому IP, который пользователь подразумевал", а **по домену из SNI** (или по сопоставлению SNI→outbound), используя для определния его IP собственный DNS-кеш или резолвер
|
||||||
|
5) `petrovich.ru` по A-записи указывает **не на IP telemt**, а значит при L7-маршрутизации трафик уйдёт на "оригинальный" сайт за этим доменом, а не в telemt: Telegram-клиент, естественно, не сможет получить ожидаемое поведение, потому что ответить с handshake на той стороне некому
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Схема №1 "Как это НЕ работает"
|
||||||
|
|
||||||
|
```text
|
||||||
|
Telegram Client
|
||||||
|
|
|
||||||
|
| (указан IP/домен telemt)
|
||||||
|
v
|
||||||
|
telemt instance
|
||||||
|
````
|
||||||
|
|
||||||
|
Ожидание: "я указал telemt -> значит трафик попадёт в telemt" - **нет!**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Схема №2. "Как это реально работает с TLS/L7-роутером и SNI"
|
||||||
|
|
||||||
|
```text
|
||||||
|
Telegram Client
|
||||||
|
|
|
||||||
|
| 1) TCP/TLS connection:
|
||||||
|
| - ClientHello:
|
||||||
|
| - SNI=petrovich.ru
|
||||||
|
v
|
||||||
|
xray-core / sing-box / любой L7 router
|
||||||
|
|
|
||||||
|
| 2) читает ClientHello -> вытаскивает SNI
|
||||||
|
| 3) выбирает маршрут по SNI
|
||||||
|
| 4) делает DNS для petrovich.ru
|
||||||
|
| 5) подключается к полученному IP по TLS с этим SNI
|
||||||
|
v
|
||||||
|
"Оригинальный" сайт, A-запись которого не на telemt
|
||||||
|
|
|
||||||
|
X не telemt -> Telegram-клиент не коннектится как ожидалось
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Почему указанный в клиенте IP/домен telemt "не спасает"
|
||||||
|
|
||||||
|
Потому что в таком режиме xray/sing-box выступает как **точка терминации TCP/TLS**, можно сказать - TLS-инспектор на уровне ClientHello, это означает:
|
||||||
|
|
||||||
|
* TCP-сессия от Telegram-клиента заканчивается на xray/sing-box
|
||||||
|
* Дальше создаётся **новая** TCP-сессия "от имени" xray/sing-box к апстриму
|
||||||
|
* Выбор апстрима делается правилами роутинга, а в TLS-сценариях самый удобный и распространённый ключ — **SNI**
|
||||||
|
|
||||||
|
То есть, "куда идти дальше" определяется логикой L7-роутера:
|
||||||
|
|
||||||
|
* либо правилами вида `if SNI == petrovich.ru -> outbound X`,
|
||||||
|
* либо более "автоматическим" поведением: `подключаться к тому хосту, который указан в SNI`,
|
||||||
|
* плюс кэш DNS и собственные резолверы роутера
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Что именно извлекается из TLS ClientHello и почему этого достаточно
|
||||||
|
|
||||||
|
TLS ClientHello отправляется **в начале** TLS-сессии и, в классическом TLS без ECH, содержит SNI в открытом виде.
|
||||||
|
|
||||||
|
Упрощённо:
|
||||||
|
|
||||||
|
```text
|
||||||
|
ClientHello:
|
||||||
|
- supported_versions
|
||||||
|
- cipher_suites
|
||||||
|
- extensions:
|
||||||
|
- server_name: petrovich.ru <-- SNI
|
||||||
|
- alpn: h2/http1.1/...
|
||||||
|
- ...
|
||||||
|
```
|
||||||
|
|
||||||
|
Роутеру не нужно расшифровывать трафик и завершать TLS "как сервер" — часто достаточно просто прочитать первые пакеты и распарсить ClientHello, чтобы получить SNI и принять решение
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Типовой алгоритм SNI-роутинга
|
||||||
|
|
||||||
|
1. Принять входящий TCP.
|
||||||
|
2. Подождать первые байты.
|
||||||
|
3. Определить протокол:
|
||||||
|
|
||||||
|
* если видим TLS ClientHello → парсим SNI/ALPN
|
||||||
|
4. Применить route rules:
|
||||||
|
|
||||||
|
* match по `server_name` / `domain` / `tls.sni`
|
||||||
|
5. Выбрать outbound:
|
||||||
|
|
||||||
|
* direct / proxy / specific upstream / detour
|
||||||
|
6. Установить исходящее соединение:
|
||||||
|
|
||||||
|
* либо на фиксированный IP:порт,
|
||||||
|
* либо на домен через DNS-resolve на стороне роутера
|
||||||
|
7. Начать проксирование данных между входом и выходом
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Почему "A-запись фронтинг-домена не на telemt" ломает кейс
|
||||||
|
|
||||||
|
### Ситуация
|
||||||
|
|
||||||
|
* В ClientHello: `SNI = petrovich.ru`
|
||||||
|
* DNS: `petrovich.ru -> 203.0.113.77` - "оригинальный" сайт
|
||||||
|
* telemt живёт на: `198.51.100.10`
|
||||||
|
|
||||||
|
### Что делает роутер
|
||||||
|
|
||||||
|
* Видит SNI `petrovich.ru`
|
||||||
|
* Либо:
|
||||||
|
|
||||||
|
* (а) напрямую коннектится к `petrovich.ru:443`, резолвя A-запись в `203.0.113.77`,
|
||||||
|
* либо:
|
||||||
|
* (б) выбирает outbound, который указывает на `petrovich.ru` как destination,
|
||||||
|
* либо:
|
||||||
|
* (в) делает sniffing/override destination по SNI
|
||||||
|
|
||||||
|
В итоге исходящий коннект идёт на `203.0.113.77:443`, а не на telemt!
|
||||||
|
Другой сервер, другой протокол, другая логика, где telemt не участвует
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## "Где именно происходит подмена destination на SNI"
|
||||||
|
|
||||||
|
Это зависит от конфигурации, но типовые варианты:
|
||||||
|
|
||||||
|
### Вариант A: outbound задан доменом (и он совпадает с SNI)
|
||||||
|
|
||||||
|
Правило по SNI выбирает outbound, у которого destination задан доменом фронтинга,
|
||||||
|
тогда DNS резолвится на стороне роутера и вы уходите на "оригинальный" хост
|
||||||
|
|
||||||
|
### Вариант B: destination override / sniffing
|
||||||
|
|
||||||
|
Роутер "снифает" SNI и **перезаписывает** destination на домен из SNI (даже если вход изначально был на IP telemt),
|
||||||
|
это особенно коварно: пользователь видит "я подключаюсь к IP telemt", но роутер после sniffing решает иначе
|
||||||
|
|
||||||
|
### Вариант C: split DNS / кеш / независимый резолвер
|
||||||
|
|
||||||
|
Даже если клиент "где-то" резолвит иначе, это не важно: конечный DNS для исходящего коннекта — на стороне xray/sing-box,
|
||||||
|
который может иметь:
|
||||||
|
|
||||||
|
* свой DoH/DoT,
|
||||||
|
* свой кеш,
|
||||||
|
* свои правила fake-ip / system resolver,
|
||||||
|
* и, как следствие, своя "карта" **домен/SNI -> IP**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Признаки того, что трафик "утёк на оригинал", а не попал в telemt
|
||||||
|
|
||||||
|
* На стороне telemt отсутствуют входящие соединения/логи
|
||||||
|
* На стороне роутера видно, что destination — домен фронтинга, а IP соответствует публичному сайту
|
||||||
|
* TLS-метрики/сертификат на выходе соответствует "оригинальному" сайту в записах трафика
|
||||||
|
* Telegram-клиент получает неожиданный тип ответов/ошибку handshaking/timeout в debug-режиме
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Best-practice решение для этого кейса: свой домен фронтинга + заглушка на telemt + Let's Encrypt
|
||||||
|
|
||||||
|
### Цель
|
||||||
|
|
||||||
|
Сделать так, чтобы:
|
||||||
|
|
||||||
|
* SNI (фронтинг-домен) **резолвился в IP telemt**,
|
||||||
|
* на IP telemt реально был TLS-сервис с валидным сертификатом под этот домен,
|
||||||
|
* даже если кто-то "попробует открыть домен как сайт", он увидит нормальную заглушку, а не "пустоту"
|
||||||
|
|
||||||
|
### Что это даёт
|
||||||
|
|
||||||
|
* xray/sing-box, маршрутизируя по SNI, будет неизбежно приходить на telemt, потому что DNS(SNI-домен) → IP telemt
|
||||||
|
* Внешний вид будет правдоподобным: обычный домен с обычным сертификатом
|
||||||
|
* Устойчивость: меньше сюрпризов от DNS-кеша/перерезолва/"умных" правил роутера
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Рекомендуемая схема (целевое состояние)
|
||||||
|
|
||||||
|
```text
|
||||||
|
Telegram Client
|
||||||
|
|
|
||||||
|
| TLS ClientHello: SNI = hello.example.com
|
||||||
|
v
|
||||||
|
xray-core / sing-box
|
||||||
|
|
|
||||||
|
| Route by SNI -> outbound -> connect to hello.example.com:443
|
||||||
|
| DNS(hello.example.com) = IP telemt
|
||||||
|
v
|
||||||
|
telemt instance (IP telemt)
|
||||||
|
|
|
||||||
|
| TLS cert for hello.example.com (Let's Encrypt)
|
||||||
|
| + сайт-заглушка / health endpoint
|
||||||
|
v
|
||||||
|
OK
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Практический чеклист (минимальный)
|
||||||
|
|
||||||
|
1. Купить/иметь домен: `hello.example.com`
|
||||||
|
2. В DNS:
|
||||||
|
|
||||||
|
* `A hello.example.com -> <IP telemt>`
|
||||||
|
* (опционально) AAAA, если используете IPv6 и он стабилен
|
||||||
|
3. На telemt-хосте:
|
||||||
|
|
||||||
|
* поднять TLS endpoint на 443 с валидным сертификатом LE под `hello.example.com`
|
||||||
|
* отдать "заглушку" (например, статический сайт), чтобы домен выглядел как обычный веб-сервис
|
||||||
|
4. В xray/sing-box правилах:
|
||||||
|
|
||||||
|
* маршрутизировать нужный трафик по SNI = `hello.example.com` в "правильный" outbound (к telemt)
|
||||||
|
* избегать конфигураций, где destination override уводит на чужой домен
|
||||||
|
5. Важно:
|
||||||
|
|
||||||
|
* если вы используете кеш DNS на роутере — сбросить/обновить его после смены A-записи
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Пояснение про сайт-заглушку
|
||||||
|
|
||||||
|
Для эмуляции TLS, telemt имеет подсистему TLS-F в `src/tls_front`:
|
||||||
|
- её модуль - fetcher, собирает TLS-профили, чтоб максимально поведенчески корректно повторять TLS конкретно указанного сайта
|
||||||
|
|
||||||
|
Когда вы указываете сайт, который не отвечает по TLS:
|
||||||
|
- fetcher не может собрать TLS-профиль и происходит fallback на `fake_cert_len` - примитивный алгоритм,
|
||||||
|
- он забивает служебную информацию TLS рандомными байтами,
|
||||||
|
- простые системы DPI не распознают это
|
||||||
|
- однако, продвинутые системы, такие как nEdge или Fraud Control в сетях мобильной связи легко заблокируют или замедлят такой трафик
|
||||||
|
|
||||||
|
Создав сайт-заглушку с Let's Encrypt сертификатом, вы даёте TLS-F возможность получить данные сертификата и корректно его "повторять" в дальнейшем
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Вариант конфиг-подхода: "SNI строго привязываем к telemt - фиксированный IP"
|
||||||
|
|
||||||
|
Чтобы полностью исключить зависимость от DNS если вам это нужно, можно сделать outbound, который ходит на **фиксированный IP telemt**, но при этом выставляет SNI/Host как `hello.example.com`.
|
||||||
|
|
||||||
|
Идея:
|
||||||
|
|
||||||
|
* destination: `IP:443`
|
||||||
|
* SNI: `hello.example.com`
|
||||||
|
* сертификат на telemt именно под `hello.example.com`
|
||||||
|
|
||||||
|
Так вы получаете:
|
||||||
|
|
||||||
|
* TLS выглядит корректно, ведь SNI совпадает с сертификатом,
|
||||||
|
* а routing никогда не уйдёт на "оригинал", потому что A-запись указывает на telemt и контроллируется вами!
|
||||||
|
|
||||||
|
Но в вашем описании проблема как раз в том, что роутер "сам решает по SNI и резолвит домен", поэтому самый универсальный вариант — сделать так, чтобы DNS всегда приводил в telemt
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Пример логики правил на псевдоконфиге L7-роутера
|
||||||
|
|
||||||
|
```text
|
||||||
|
if inbound is TLS and sni == "hello.example.com":
|
||||||
|
route -> outbound "telemt"
|
||||||
|
else:
|
||||||
|
route -> outbound "default"
|
||||||
|
```
|
||||||
|
|
||||||
|
Outbound `telemt`:
|
||||||
|
|
||||||
|
* destination: `hello.example.com:443`
|
||||||
|
* TLS enabled
|
||||||
|
* SNI: `hello.example.com`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Отдельно: что может неожиданно сломать даже "правильный" DNS
|
||||||
|
|
||||||
|
* **Кеширование DNS** на xray/sing-box или на системном резолвере, особенно при смене A-записи
|
||||||
|
* **Split-horizon DNS**: разные ответы внутри/снаружи, попытки подмены/терминирования в других точках
|
||||||
|
* **IPv6**: если есть AAAA и он указывает не туда, роутер может предпочесть IPv6: помните, что поддержка v6 нестабильна и не рекомендуется в prod
|
||||||
|
* **DoH/DoT** на роутере: он может резолвить не тем резолвером, которым вы проверяли
|
||||||
|
|
||||||
|
Минимальная гигиена:
|
||||||
|
|
||||||
|
* контролировать A/AAAA,
|
||||||
|
* держать TTL разумным,
|
||||||
|
* проверять, каким резолвером пользуется именно роутер,
|
||||||
|
* при необходимости отключить/ограничить destination override
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Итог
|
||||||
|
|
||||||
|
В режиме TLS-fronting с xray-core/sing-box как L7/TLS-роутером **SNI становится приоритетным "source-of-truth" для маршрутизации**
|
||||||
|
|
||||||
|
Если фронтинг-домен по DNS указывает не на IP telemt, роутер честно уводит трафик на "оригинальный" сайт, потому что он строит исходящее соединение "по SNI"
|
||||||
|
|
||||||
|
Надёжное решение для этого кейса:
|
||||||
|
|
||||||
|
* использовать **свой домен** для фронтинга,
|
||||||
|
* направить его **A/AAAA** на IP telemt,
|
||||||
|
* поднять на telemt **TLS-сервис с Let’s Encrypt сертификатом** под этот домен,
|
||||||
|
* (желательно) держать **сайт-заглушку**, чтобы 443 выглядел как обычный HTTPS
|
||||||
285
docs/model/MODEL.en.md
Normal file
285
docs/model/MODEL.en.md
Normal file
@@ -0,0 +1,285 @@
|
|||||||
|
# Telemt Runtime Model
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
This document defines runtime concepts used by the Middle-End (ME) transport pipeline and the orchestration logic around it.
|
||||||
|
|
||||||
|
It focuses on:
|
||||||
|
- `ME Pool / Reader / Writer / Refill / Registry`
|
||||||
|
- `Adaptive Floor`
|
||||||
|
- `Trio-State`
|
||||||
|
- `Generation Lifecycle`
|
||||||
|
|
||||||
|
## Core Entities
|
||||||
|
|
||||||
|
### ME Pool
|
||||||
|
`ME Pool` is the runtime orchestrator for all Middle-End writers.
|
||||||
|
|
||||||
|
Responsibilities:
|
||||||
|
- Holds writer inventory by DC/family/endpoint.
|
||||||
|
- Maintains routing primitives and writer selection policy.
|
||||||
|
- Tracks generation state (`active`, `warm`, `draining` context).
|
||||||
|
- Applies runtime policies (floor mode, refill, reconnect, reinit, fallback behavior).
|
||||||
|
- Exposes readiness gates used by admission logic (for conditional accept/cast behavior).
|
||||||
|
|
||||||
|
Non-goals:
|
||||||
|
- It does not own client protocol decoding.
|
||||||
|
- It does not own per-client business policy (quotas/limits).
|
||||||
|
|
||||||
|
### ME Writer
|
||||||
|
`ME Writer` is a long-lived ME RPC tunnel bound to one concrete ME endpoint (`ip:port`), with:
|
||||||
|
- Outbound command channel (send path).
|
||||||
|
- Associated reader loop (inbound path).
|
||||||
|
- Health/degraded flags.
|
||||||
|
- Contour/state and generation metadata.
|
||||||
|
|
||||||
|
A writer is the actual data plane carrier for client sessions once bound.
|
||||||
|
|
||||||
|
### ME Reader
|
||||||
|
`ME Reader` is the inbound parser/dispatcher for one writer:
|
||||||
|
- Reads/decrypts ME RPC frames.
|
||||||
|
- Validates sequence/checksum.
|
||||||
|
- Routes payloads to client-connection channels via `Registry`.
|
||||||
|
- Emits close/ack/data events and updates telemetry.
|
||||||
|
|
||||||
|
Design intent:
|
||||||
|
- Reader must stay non-blocking as much as possible.
|
||||||
|
- Backpressure on a single client route must not stall the whole writer stream.
|
||||||
|
|
||||||
|
### Refill
|
||||||
|
`Refill` is the recovery mechanism that restores writer coverage when capacity drops:
|
||||||
|
- Per-endpoint restore (same endpoint first).
|
||||||
|
- Per-DC restore to satisfy required floor.
|
||||||
|
- Optional outage-mode/shadow behavior for fragile single-endpoint DCs.
|
||||||
|
|
||||||
|
Refill works asynchronously and should not block hot routing paths.
|
||||||
|
|
||||||
|
### Registry
|
||||||
|
`Registry` is the routing index between ME and client sessions:
|
||||||
|
- `conn_id -> client response channel`
|
||||||
|
- `conn_id <-> writer_id` binding map
|
||||||
|
- writer activity snapshots and idle tracking
|
||||||
|
|
||||||
|
Main invariants:
|
||||||
|
- A `conn_id` routes to at most one active response channel.
|
||||||
|
- Writer loss triggers safe unbind/cleanup and close propagation.
|
||||||
|
- Registry state is the source of truth for active ME-bound session mapping.
|
||||||
|
|
||||||
|
## Adaptive Floor
|
||||||
|
|
||||||
|
### What it is
|
||||||
|
`Adaptive Floor` is a runtime policy that changes target writer count per DC based on observed activity, instead of always holding static peak floor.
|
||||||
|
|
||||||
|
### Why it exists
|
||||||
|
Goals:
|
||||||
|
- Reduce idle writer churn under low traffic.
|
||||||
|
- Keep enough warm capacity to avoid client-visible stalls on burst recovery.
|
||||||
|
- Limit needless reconnect storms on unstable endpoints.
|
||||||
|
|
||||||
|
### Behavioral model
|
||||||
|
- Under activity: floor converges toward configured static requirement.
|
||||||
|
- Under prolonged idle: floor can shrink to a safe minimum.
|
||||||
|
- Recovery/grace windows prevent aggressive oscillation.
|
||||||
|
|
||||||
|
### Safety constraints
|
||||||
|
- Never violate minimal survivability floor for a DC group.
|
||||||
|
- Refill must still restore quickly on demand.
|
||||||
|
- Floor adaptation must not force-drop already bound healthy sessions.
|
||||||
|
|
||||||
|
## Trio-State
|
||||||
|
|
||||||
|
`Trio-State` is writer contouring:
|
||||||
|
- `Warm`
|
||||||
|
- `Active`
|
||||||
|
- `Draining`
|
||||||
|
|
||||||
|
### State semantics
|
||||||
|
- `Warm`: connected and validated, not primary for new binds.
|
||||||
|
- `Active`: preferred for new binds and normal traffic.
|
||||||
|
- `Draining`: no new regular binds; existing sessions continue until graceful retirement rules apply.
|
||||||
|
|
||||||
|
### Transition intent
|
||||||
|
- `Warm -> Active`: when coverage/readiness conditions are satisfied.
|
||||||
|
- `Active -> Draining`: on generation swap, endpoint replacement, or controlled retirement.
|
||||||
|
- `Draining -> removed`: after drain TTL/force-close policy (or when naturally empty).
|
||||||
|
|
||||||
|
This separation reduces SPOF and keeps cutovers predictable.
|
||||||
|
|
||||||
|
## Generation Lifecycle
|
||||||
|
|
||||||
|
Generation isolates pool epochs during reinit/reconfiguration.
|
||||||
|
|
||||||
|
### Lifecycle phases
|
||||||
|
1. `Bootstrap`: initial writers are established.
|
||||||
|
2. `Warmup`: next generation writers are created and validated.
|
||||||
|
3. `Activation`: generation promoted to active when coverage gate passes.
|
||||||
|
4. `Drain`: previous generation becomes draining, existing sessions are allowed to finish.
|
||||||
|
5. `Retire`: old generation writers are removed after graceful rules.
|
||||||
|
|
||||||
|
### Operational guarantees
|
||||||
|
- No partial generation activation without minimum coverage.
|
||||||
|
- Existing healthy client sessions should not be dropped just because a new generation appears.
|
||||||
|
- Draining generation exists to absorb in-flight traffic during swap.
|
||||||
|
|
||||||
|
### Readiness and admission
|
||||||
|
Pool readiness is not equivalent to “all endpoints fully saturated”.
|
||||||
|
Typical gating strategy:
|
||||||
|
- Open admission when per-DC minimal alive coverage exists.
|
||||||
|
- Continue background saturation for multi-endpoint DCs.
|
||||||
|
|
||||||
|
This keeps startup latency low while preserving eventual full capacity.
|
||||||
|
|
||||||
|
## Interactions Between Concepts
|
||||||
|
|
||||||
|
- `Generation` defines pool epochs.
|
||||||
|
- `Trio-State` defines per-writer role inside/around those epochs.
|
||||||
|
- `Adaptive Floor` defines how much capacity should be maintained right now.
|
||||||
|
- `Refill` is the actuator that closes the gap between desired and current capacity.
|
||||||
|
- `Registry` keeps per-session routing correctness while all of the above changes over time.
|
||||||
|
|
||||||
|
## Architectural Approach
|
||||||
|
|
||||||
|
### Layered Design
|
||||||
|
The runtime is intentionally split into two planes:
|
||||||
|
- `Control Plane`: decides desired topology and policy (`floor`, `generation swap`, `refill`, `fallback`).
|
||||||
|
- `Data Plane`: executes packet/session transport (`reader`, `writer`, routing, acks, close propagation).
|
||||||
|
|
||||||
|
Architectural rule:
|
||||||
|
- Control Plane may change writer inventory and policy.
|
||||||
|
- Data Plane must remain stable and low-latency while those changes happen.
|
||||||
|
|
||||||
|
### Ownership Model
|
||||||
|
Ownership is centered around explicit state domains:
|
||||||
|
- `MePool` owns writer lifecycle and policy state.
|
||||||
|
- `Registry` owns per-connection routing bindings.
|
||||||
|
- `Writer task` owns outbound ME socket send progression.
|
||||||
|
- `Reader task` owns inbound ME socket parsing and event dispatch.
|
||||||
|
|
||||||
|
This prevents accidental cross-layer mutation and keeps invariants local.
|
||||||
|
|
||||||
|
### Control Plane Responsibilities
|
||||||
|
Control Plane is event-driven and policy-driven:
|
||||||
|
- Startup initialization and readiness gates.
|
||||||
|
- Runtime reinit (periodic or config-triggered).
|
||||||
|
- Coverage checks per DC/family/endpoint group.
|
||||||
|
- Floor enforcement (static/adaptive).
|
||||||
|
- Refill scheduling and retry orchestration.
|
||||||
|
- Generation transition (`warm -> active`, previous `active -> draining`).
|
||||||
|
|
||||||
|
Control Plane must prioritize determinism over short-term aggressiveness.
|
||||||
|
|
||||||
|
### Data Plane Responsibilities
|
||||||
|
Data Plane is throughput-first and allocation-sensitive:
|
||||||
|
- Session bind to writer.
|
||||||
|
- Per-frame parsing/validation and dispatch.
|
||||||
|
- Ack and close signal propagation.
|
||||||
|
- Route drop behavior under missing connection or closed channel.
|
||||||
|
- Minimal critical logging in hot path.
|
||||||
|
|
||||||
|
Data Plane should avoid waiting on operations that are not strictly required for frame correctness.
|
||||||
|
|
||||||
|
## Concurrency and Synchronization
|
||||||
|
|
||||||
|
### Concurrency Principles
|
||||||
|
- Per-writer isolation: each writer has independent send/read task loops.
|
||||||
|
- Per-connection isolation: client channel state is scoped by `conn_id`.
|
||||||
|
- Asynchronous recovery: refill/reconnect runs outside the packet hot path.
|
||||||
|
|
||||||
|
### Synchronization Strategy
|
||||||
|
- Shared maps use fine-grained, short-lived locking.
|
||||||
|
- Read-mostly paths avoid broad write-lock windows.
|
||||||
|
- Backpressure decisions are localized at route/channel boundary.
|
||||||
|
|
||||||
|
Design target:
|
||||||
|
- A slow consumer should degrade only itself (or its route), not global writer progress.
|
||||||
|
|
||||||
|
### Cancellation and Shutdown
|
||||||
|
Writer and reader loops are cancellation-aware:
|
||||||
|
- explicit cancel token / close command support;
|
||||||
|
- safe unbind and cleanup via registry;
|
||||||
|
- deterministic order: stop admission -> drain/close -> release resources.
|
||||||
|
|
||||||
|
## Consistency Model
|
||||||
|
|
||||||
|
### Session Consistency
|
||||||
|
For one `conn_id`:
|
||||||
|
- exactly one active route target at a time;
|
||||||
|
- close and unbind must be idempotent;
|
||||||
|
- writer loss must not leave dangling bindings.
|
||||||
|
|
||||||
|
### Generation Consistency
|
||||||
|
Generational consistency guarantees:
|
||||||
|
- New generation is not promoted before minimum coverage gate.
|
||||||
|
- Previous generation remains available in `draining` state during handover.
|
||||||
|
- Forced retirement is policy-bound (`drain ttl`, optional force-close), not immediate.
|
||||||
|
|
||||||
|
### Policy Consistency
|
||||||
|
Policy changes (`adaptive/static floor`, fallback mode, retries) should apply without violating established active-session routing invariants.
|
||||||
|
|
||||||
|
## Backpressure and Flow Control
|
||||||
|
|
||||||
|
### Route-Level Backpressure
|
||||||
|
Route channels are bounded by design.
|
||||||
|
When pressure increases:
|
||||||
|
- short burst absorption is allowed;
|
||||||
|
- prolonged congestion triggers controlled drop semantics;
|
||||||
|
- drop accounting is explicit via metrics/counters.
|
||||||
|
|
||||||
|
### Reader Non-Blocking Priority
|
||||||
|
Inbound ME reader path should never be serialized behind one congested client route.
|
||||||
|
Practical implication:
|
||||||
|
- prefer non-blocking route attempt in the parser loop;
|
||||||
|
- move heavy recovery to async side paths.
|
||||||
|
|
||||||
|
## Failure Domain Strategy
|
||||||
|
|
||||||
|
### Endpoint-Level Failure
|
||||||
|
Failure of one endpoint should trigger endpoint-scoped recovery first:
|
||||||
|
- same endpoint reconnect;
|
||||||
|
- endpoint replacement within same DC group if applicable.
|
||||||
|
|
||||||
|
### DC-Level Degradation
|
||||||
|
If a DC group cannot satisfy floor:
|
||||||
|
- keep service via remaining coverage if policy allows;
|
||||||
|
- continue asynchronous refill saturation in background.
|
||||||
|
|
||||||
|
### Whole-Pool Readiness Loss
|
||||||
|
If no sufficient ME coverage exists:
|
||||||
|
- admission gate can hold new accepts (conditional policy);
|
||||||
|
- existing sessions should continue when their path remains healthy.
|
||||||
|
|
||||||
|
## Performance Architecture Notes
|
||||||
|
|
||||||
|
### Hotpath Discipline
|
||||||
|
Allowed in hotpath:
|
||||||
|
- fixed-size parsing and cheap validation;
|
||||||
|
- bounded channel operations;
|
||||||
|
- precomputed or low-allocation access patterns.
|
||||||
|
|
||||||
|
Avoid in hotpath:
|
||||||
|
- repeated expensive decoding;
|
||||||
|
- broad locks with awaits inside critical sections;
|
||||||
|
- verbose high-frequency logging.
|
||||||
|
|
||||||
|
### Throughput Stability Over Peak Spikes
|
||||||
|
Architecture prefers stable throughput and predictable latency over short peak gains that increase churn or long-tail reconnect times.
|
||||||
|
|
||||||
|
## Evolution and Extension Rules
|
||||||
|
|
||||||
|
To evolve this model safely:
|
||||||
|
- Add new policy knobs in Control Plane first.
|
||||||
|
- Keep Data Plane contracts stable (`conn_id`, route semantics, close semantics).
|
||||||
|
- Validate generation and registry invariants before enabling by default.
|
||||||
|
- Introduce new retry/recovery strategies behind explicit config.
|
||||||
|
|
||||||
|
## Failure and Recovery Notes
|
||||||
|
|
||||||
|
- Single-endpoint DC failure is a normal degraded mode case; policy should prioritize fast reconnect and optional shadow/probing strategies.
|
||||||
|
- Idle close by peer should be treated as expected when upstream enforces idle timeout.
|
||||||
|
- Reconnect backoff must protect against synchronized churn while still allowing fast first retries.
|
||||||
|
- Fallback (`ME -> direct DC`) is a policy switch, not a transport bug by itself.
|
||||||
|
|
||||||
|
## Terminology Summary
|
||||||
|
- `Coverage`: enough live writers to satisfy per-DC acceptance policy.
|
||||||
|
- `Floor`: target minimum writer count policy.
|
||||||
|
- `Churn`: frequent writer reconnect/remove cycles.
|
||||||
|
- `Hotpath`: per-packet/per-connection data path where extra waits/allocations are expensive.
|
||||||
285
docs/model/MODEL.ru.md
Normal file
285
docs/model/MODEL.ru.md
Normal file
@@ -0,0 +1,285 @@
|
|||||||
|
# Runtime-модель Telemt
|
||||||
|
|
||||||
|
## Область описания
|
||||||
|
Документ фиксирует ключевые runtime-понятия пайплайна Middle-End (ME) и оркестрации вокруг него.
|
||||||
|
|
||||||
|
Фокус:
|
||||||
|
- `ME Pool / Reader / Writer / Refill / Registry`
|
||||||
|
- `Adaptive Floor`
|
||||||
|
- `Trio-State`
|
||||||
|
- `Generation Lifecycle`
|
||||||
|
|
||||||
|
## Базовые сущности
|
||||||
|
|
||||||
|
### ME Pool
|
||||||
|
`ME Pool` — центральный оркестратор всех Middle-End writer-ов.
|
||||||
|
|
||||||
|
Зона ответственности:
|
||||||
|
- хранит инвентарь writer-ов по DC/family/endpoint;
|
||||||
|
- управляет выбором writer-а и маршрутизацией;
|
||||||
|
- ведёт состояние поколений (`active`, `warm`, `draining` контекст);
|
||||||
|
- применяет runtime-политики (floor, refill, reconnect, reinit, fallback);
|
||||||
|
- отдаёт сигналы готовности для admission-логики (conditional accept/cast).
|
||||||
|
|
||||||
|
Что не делает:
|
||||||
|
- не декодирует клиентский протокол;
|
||||||
|
- не реализует бизнес-политику пользователя (квоты/лимиты).
|
||||||
|
|
||||||
|
### ME Writer
|
||||||
|
`ME Writer` — долгоживущий ME RPC-канал к конкретному endpoint (`ip:port`), у которого есть:
|
||||||
|
- канал команд на отправку;
|
||||||
|
- связанный reader loop для входящего потока;
|
||||||
|
- флаги состояния/деградации;
|
||||||
|
- метаданные contour/state и generation.
|
||||||
|
|
||||||
|
Writer — это фактический data-plane носитель клиентских сессий после бинда.
|
||||||
|
|
||||||
|
### ME Reader
|
||||||
|
`ME Reader` — входной parser/dispatcher одного writer-а:
|
||||||
|
- читает и расшифровывает ME RPC-фреймы;
|
||||||
|
- проверяет sequence/checksum;
|
||||||
|
- маршрутизирует payload в client-каналы через `Registry`;
|
||||||
|
- обрабатывает close/ack/data и обновляет телеметрию.
|
||||||
|
|
||||||
|
Инженерный принцип:
|
||||||
|
- Reader должен оставаться неблокирующим.
|
||||||
|
- Backpressure одной клиентской сессии не должен останавливать весь поток writer-а.
|
||||||
|
|
||||||
|
### Refill
|
||||||
|
`Refill` — механизм восстановления покрытия writer-ов при просадке:
|
||||||
|
- восстановление на том же endpoint в первую очередь;
|
||||||
|
- восстановление по DC до требуемого floor;
|
||||||
|
- опциональные outage/shadow-режимы для хрупких single-endpoint DC.
|
||||||
|
|
||||||
|
Refill работает асинхронно и не должен блокировать hotpath.
|
||||||
|
|
||||||
|
### Registry
|
||||||
|
`Registry` — маршрутизационный индекс между ME и клиентскими сессиями:
|
||||||
|
- `conn_id -> канал ответа клиенту`;
|
||||||
|
- map биндов `conn_id <-> writer_id`;
|
||||||
|
- снимки активности writer-ов и idle-трекинг.
|
||||||
|
|
||||||
|
Ключевые инварианты:
|
||||||
|
- один `conn_id` маршрутизируется максимум в один активный канал ответа;
|
||||||
|
- потеря writer-а приводит к безопасному unbind/cleanup и отправке close;
|
||||||
|
- именно `Registry` является источником истины по активным ME-биндам.
|
||||||
|
|
||||||
|
## Adaptive Floor
|
||||||
|
|
||||||
|
### Что это
|
||||||
|
`Adaptive Floor` — runtime-политика, которая динамически меняет целевое число writer-ов на DC в зависимости от активности, а не держит всегда фиксированный статический floor.
|
||||||
|
|
||||||
|
### Зачем
|
||||||
|
Цели:
|
||||||
|
- уменьшить churn на idle-трафике;
|
||||||
|
- сохранить достаточную прогретую ёмкость для быстрых всплесков;
|
||||||
|
- снизить лишние reconnect-штормы на нестабильных endpoint.
|
||||||
|
|
||||||
|
### Модель поведения
|
||||||
|
- при активности floor стремится к статическому требованию;
|
||||||
|
- при длительном idle floor может снижаться до безопасного минимума;
|
||||||
|
- grace/recovery окна не дают системе "флапать" слишком резко.
|
||||||
|
|
||||||
|
### Ограничения безопасности
|
||||||
|
- нельзя нарушать минимальный floor выживаемости DC-группы;
|
||||||
|
- refill обязан быстро нарастить покрытие по запросу;
|
||||||
|
- адаптация не должна принудительно ронять уже привязанные healthy-сессии.
|
||||||
|
|
||||||
|
## Trio-State
|
||||||
|
|
||||||
|
`Trio-State` — контурная роль writer-а:
|
||||||
|
- `Warm`
|
||||||
|
- `Active`
|
||||||
|
- `Draining`
|
||||||
|
|
||||||
|
### Семантика состояний
|
||||||
|
- `Warm`: writer подключён и валиден, но не основной для новых биндов.
|
||||||
|
- `Active`: приоритетный для новых биндов и обычного трафика.
|
||||||
|
- `Draining`: новые обычные бинды не назначаются; текущие сессии живут до правил graceful-вывода.
|
||||||
|
|
||||||
|
### Логика переходов
|
||||||
|
- `Warm -> Active`: когда достигнуты условия покрытия/готовности.
|
||||||
|
- `Active -> Draining`: при swap поколения, замене endpoint или контролируемом выводе.
|
||||||
|
- `Draining -> removed`: после drain TTL/force-close политики (или естественного опустошения).
|
||||||
|
|
||||||
|
Такое разделение снижает SPOF-риски и делает cutover предсказуемым.
|
||||||
|
|
||||||
|
## Generation Lifecycle
|
||||||
|
|
||||||
|
Generation изолирует эпохи пула при reinit/reconfiguration.
|
||||||
|
|
||||||
|
### Фазы жизненного цикла
|
||||||
|
1. `Bootstrap`: поднимается начальный набор writer-ов.
|
||||||
|
2. `Warmup`: создаётся и валидируется новое поколение.
|
||||||
|
3. `Activation`: новое поколение становится active после прохождения coverage-gate.
|
||||||
|
4. `Drain`: предыдущее поколение переводится в draining, текущим сессиям дают завершиться.
|
||||||
|
5. `Retire`: старое поколение удаляется по graceful-правилам.
|
||||||
|
|
||||||
|
### Операционные гарантии
|
||||||
|
- нельзя активировать поколение частично без минимального покрытия;
|
||||||
|
- healthy-клиенты не должны теряться только из-за появления нового поколения;
|
||||||
|
- draining-поколение служит буфером для in-flight трафика во время swap.
|
||||||
|
|
||||||
|
### Готовность и приём клиентов
|
||||||
|
Готовность пула не равна "все endpoint полностью насыщены".
|
||||||
|
Типичная стратегия:
|
||||||
|
- открыть admission при минимально достаточном alive-покрытии по DC;
|
||||||
|
- параллельно продолжать saturation для multi-endpoint DC.
|
||||||
|
|
||||||
|
Это уменьшает startup latency и сохраняет выход на полную ёмкость.
|
||||||
|
|
||||||
|
## Как понятия связаны между собой
|
||||||
|
|
||||||
|
- `Generation` задаёт эпохи пула.
|
||||||
|
- `Trio-State` задаёт роль каждого writer-а внутри/между эпохами.
|
||||||
|
- `Adaptive Floor` задаёт, сколько ёмкости нужно сейчас.
|
||||||
|
- `Refill` — исполнитель, который закрывает разницу между desired и current capacity.
|
||||||
|
- `Registry` гарантирует корректную маршрутизацию сессий, пока всё выше меняется.
|
||||||
|
|
||||||
|
## Архитектурный подход
|
||||||
|
|
||||||
|
### Слоистая модель
|
||||||
|
Runtime специально разделён на две плоскости:
|
||||||
|
- `Control Plane`: принимает решения о целевой топологии и политиках (`floor`, `generation swap`, `refill`, `fallback`).
|
||||||
|
- `Data Plane`: исполняет транспорт сессий и пакетов (`reader`, `writer`, маршрутизация, ack, close).
|
||||||
|
|
||||||
|
Ключевое правило:
|
||||||
|
- Control Plane может менять состав writer-ов и policy.
|
||||||
|
- Data Plane должен оставаться стабильным и низколатентным в момент этих изменений.
|
||||||
|
|
||||||
|
### Модель владения состоянием
|
||||||
|
Владение разделено по доменам:
|
||||||
|
- `MePool` владеет жизненным циклом writer-ов и policy-state.
|
||||||
|
- `Registry` владеет routing-биндами клиентских сессий.
|
||||||
|
- `Writer task` владеет исходящей прогрессией ME-сокета.
|
||||||
|
- `Reader task` владеет входящим парсингом и dispatch-событиями.
|
||||||
|
|
||||||
|
Это ограничивает побочные мутации и локализует инварианты.
|
||||||
|
|
||||||
|
### Обязанности Control Plane
|
||||||
|
Control Plane работает событийно и policy-ориентированно:
|
||||||
|
- стартовая инициализация и readiness-gate;
|
||||||
|
- runtime reinit (периодический и/или по изменению конфигурации);
|
||||||
|
- проверки покрытия по DC/family/endpoint group;
|
||||||
|
- применение floor-политики (static/adaptive);
|
||||||
|
- планирование refill и orchestration retry;
|
||||||
|
- переходы поколений (`warm -> active`, прежний `active -> draining`).
|
||||||
|
|
||||||
|
Для него важнее детерминизм, чем агрессивная краткосрочная реакция.
|
||||||
|
|
||||||
|
### Обязанности Data Plane
|
||||||
|
Data Plane ориентирован на пропускную способность и предсказуемую задержку:
|
||||||
|
- bind клиентской сессии к writer-у;
|
||||||
|
- per-frame parsing/validation/dispatch;
|
||||||
|
- распространение ack/close;
|
||||||
|
- корректная реакция на missing conn/closed channel;
|
||||||
|
- минимальный лог-шум в hotpath.
|
||||||
|
|
||||||
|
Data Plane не должен ждать операций, не критичных для корректности текущего фрейма.
|
||||||
|
|
||||||
|
## Конкурентность и синхронизация
|
||||||
|
|
||||||
|
### Принципы конкурентности
|
||||||
|
- Изоляция по writer-у: у каждого writer-а независимые send/read loop.
|
||||||
|
- Изоляция по сессии: состояние канала локально для `conn_id`.
|
||||||
|
- Асинхронное восстановление: refill/reconnect выполняются вне пакетного hotpath.
|
||||||
|
|
||||||
|
### Стратегия синхронизации
|
||||||
|
- Для shared map используются короткие и узкие lock-секции.
|
||||||
|
- Read-heavy пути избегают длительных write-lock окон.
|
||||||
|
- Решения по backpressure локализованы на границе route/channel.
|
||||||
|
|
||||||
|
Цель:
|
||||||
|
- медленный consumer должен деградировать локально, не останавливая глобальный прогресс writer-а.
|
||||||
|
|
||||||
|
### Cancellation и shutdown
|
||||||
|
Reader/Writer loop должны быть cancellation-aware:
|
||||||
|
- явные cancel token / close command;
|
||||||
|
- безопасный unbind/cleanup через registry;
|
||||||
|
- детерминированный порядок: stop admission -> drain/close -> release resources.
|
||||||
|
|
||||||
|
## Модель согласованности
|
||||||
|
|
||||||
|
### Согласованность сессии
|
||||||
|
Для одного `conn_id`:
|
||||||
|
- одновременно ровно один активный route-target;
|
||||||
|
- close/unbind операции идемпотентны;
|
||||||
|
- потеря writer-а не оставляет dangling-бинды.
|
||||||
|
|
||||||
|
### Согласованность поколения
|
||||||
|
Гарантии generation:
|
||||||
|
- новое поколение не активируется до прохождения минимального coverage-gate;
|
||||||
|
- предыдущее поколение остаётся в `draining` на время handover;
|
||||||
|
- принудительный вывод writer-ов ограничен policy (`drain ttl`, optional force-close), а не мгновенный.
|
||||||
|
|
||||||
|
### Согласованность политик
|
||||||
|
Изменение policy (`adaptive/static floor`, fallback mode, retries) не должно ломать инварианты маршрутизации уже активных сессий.
|
||||||
|
|
||||||
|
## Backpressure и управление потоком
|
||||||
|
|
||||||
|
### Route-level backpressure
|
||||||
|
Route-каналы намеренно bounded.
|
||||||
|
При росте нагрузки:
|
||||||
|
- кратковременный burst поглощается;
|
||||||
|
- длительная перегрузка переходит в контролируемую drop-семантику;
|
||||||
|
- все drop-сценарии должны быть прозрачно видны в метриках.
|
||||||
|
|
||||||
|
### Приоритет неблокирующего Reader
|
||||||
|
Входящий ME-reader path не должен сериализоваться из-за одной перегруженной клиентской сессии.
|
||||||
|
Практически это означает:
|
||||||
|
- использовать неблокирующую попытку route в parser loop;
|
||||||
|
- выносить тяжёлое восстановление в асинхронные side-path.
|
||||||
|
|
||||||
|
## Стратегия доменов отказа
|
||||||
|
|
||||||
|
### Отказ отдельного endpoint
|
||||||
|
Сначала применяется endpoint-local recovery:
|
||||||
|
- reconnect в тот же endpoint;
|
||||||
|
- затем замена endpoint внутри той же DC-группы (если доступно).
|
||||||
|
|
||||||
|
### Деградация уровня DC
|
||||||
|
Если DC-группа не набирает floor:
|
||||||
|
- сервис сохраняется на остаточном покрытии (если policy разрешает);
|
||||||
|
- saturation refill продолжается асинхронно в фоне.
|
||||||
|
|
||||||
|
### Потеря готовности всего пула
|
||||||
|
Если достаточного ME-покрытия нет:
|
||||||
|
- admission gate может временно закрыть приём новых подключений (conditional policy);
|
||||||
|
- уже активные сессии продолжают работать, пока их маршрут остаётся healthy.
|
||||||
|
|
||||||
|
## Архитектурные заметки по производительности
|
||||||
|
|
||||||
|
### Дисциплина hotpath
|
||||||
|
Допустимо в hotpath:
|
||||||
|
- фиксированный и дешёвый parsing/validation;
|
||||||
|
- bounded channel operations;
|
||||||
|
- precomputed/low-allocation доступ к данным.
|
||||||
|
|
||||||
|
Нежелательно в hotpath:
|
||||||
|
- повторные дорогие decode;
|
||||||
|
- широкие lock-секции с `await` внутри;
|
||||||
|
- высокочастотный подробный logging.
|
||||||
|
|
||||||
|
### Стабильность важнее пиков
|
||||||
|
Архитектура приоритетно выбирает стабильную пропускную способность и предсказуемую latency, а не краткосрочные пики ценой churn и long-tail reconnect.
|
||||||
|
|
||||||
|
## Правила эволюции модели
|
||||||
|
|
||||||
|
Чтобы расширять модель безопасно:
|
||||||
|
- новые policy knobs сначала внедрять в Control Plane;
|
||||||
|
- контракты Data Plane (`conn_id`, route/close семантика) держать стабильными;
|
||||||
|
- перед дефолтным включением проверять generation/registry инварианты;
|
||||||
|
- новые recovery/retry стратегии вводить через явный config-флаг.
|
||||||
|
|
||||||
|
## Нюансы отказов и восстановления
|
||||||
|
|
||||||
|
- падение single-endpoint DC — штатный деградированный сценарий; приоритет: быстрый reconnect и, при необходимости, shadow/probing;
|
||||||
|
- idle-close со стороны peer должен считаться нормальным событием при upstream idle-timeout;
|
||||||
|
- backoff reconnect-логики должен ограничивать синхронный churn, но сохранять быстрые первые попытки;
|
||||||
|
- fallback (`ME -> direct DC`) — это переключаемая policy-ветка, а не автоматический признак бага транспорта.
|
||||||
|
|
||||||
|
## Краткий словарь
|
||||||
|
- `Coverage`: достаточное число живых writer-ов для политики приёма по DC.
|
||||||
|
- `Floor`: целевая минимальная ёмкость writer-ов.
|
||||||
|
- `Churn`: частые циклы reconnect/remove writer-ов.
|
||||||
|
- `Hotpath`: пер-пакетный/пер-коннектный путь, где любые лишние ожидания и аллокации особенно дороги.
|
||||||
115
install.sh
Normal file
115
install.sh
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
REPO="${REPO:-telemt/telemt}"
|
||||||
|
BIN_NAME="${BIN_NAME:-telemt}"
|
||||||
|
VERSION="${1:-${VERSION:-latest}}"
|
||||||
|
INSTALL_DIR="${INSTALL_DIR:-/usr/local/bin}"
|
||||||
|
|
||||||
|
say() {
|
||||||
|
printf '%s\n' "$*"
|
||||||
|
}
|
||||||
|
|
||||||
|
die() {
|
||||||
|
printf 'Error: %s\n' "$*" >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
need_cmd() {
|
||||||
|
command -v "$1" >/dev/null 2>&1 || die "required command not found: $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
detect_os() {
|
||||||
|
os="$(uname -s)"
|
||||||
|
case "$os" in
|
||||||
|
Linux) printf 'linux\n' ;;
|
||||||
|
OpenBSD) printf 'openbsd\n' ;;
|
||||||
|
*) printf '%s\n' "$os" ;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
detect_arch() {
|
||||||
|
arch="$(uname -m)"
|
||||||
|
case "$arch" in
|
||||||
|
x86_64|amd64) printf 'x86_64\n' ;;
|
||||||
|
aarch64|arm64) printf 'aarch64\n' ;;
|
||||||
|
*) die "unsupported architecture: $arch" ;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
detect_libc() {
|
||||||
|
case "$(ldd --version 2>&1 || true)" in
|
||||||
|
*musl*) printf 'musl\n' ;;
|
||||||
|
*) printf 'gnu\n' ;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
fetch_to_stdout() {
|
||||||
|
url="$1"
|
||||||
|
if command -v curl >/dev/null 2>&1; then
|
||||||
|
curl -fsSL "$url"
|
||||||
|
elif command -v wget >/dev/null 2>&1; then
|
||||||
|
wget -qO- "$url"
|
||||||
|
else
|
||||||
|
die "neither curl nor wget is installed"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
install_binary() {
|
||||||
|
src="$1"
|
||||||
|
dst="$2"
|
||||||
|
|
||||||
|
if [ -w "$INSTALL_DIR" ] || { [ ! -e "$INSTALL_DIR" ] && [ -w "$(dirname "$INSTALL_DIR")" ]; }; then
|
||||||
|
mkdir -p "$INSTALL_DIR"
|
||||||
|
install -m 0755 "$src" "$dst"
|
||||||
|
elif command -v sudo >/dev/null 2>&1; then
|
||||||
|
sudo mkdir -p "$INSTALL_DIR"
|
||||||
|
sudo install -m 0755 "$src" "$dst"
|
||||||
|
else
|
||||||
|
die "cannot write to $INSTALL_DIR and sudo is not available"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
need_cmd uname
|
||||||
|
need_cmd tar
|
||||||
|
need_cmd mktemp
|
||||||
|
need_cmd grep
|
||||||
|
need_cmd install
|
||||||
|
|
||||||
|
ARCH="$(detect_arch)"
|
||||||
|
OS="$(detect_os)"
|
||||||
|
|
||||||
|
if [ "$OS" != "linux" ]; then
|
||||||
|
case "$OS" in
|
||||||
|
openbsd)
|
||||||
|
die "install.sh installs only Linux release artifacts. On OpenBSD, build from source (see docs/OPENBSD.en.md)."
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
die "unsupported operating system for install.sh: $OS"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
LIBC="$(detect_libc)"
|
||||||
|
|
||||||
|
case "$VERSION" in
|
||||||
|
latest)
|
||||||
|
URL="https://github.com/$REPO/releases/latest/download/${BIN_NAME}-${ARCH}-linux-${LIBC}.tar.gz"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
URL="https://github.com/$REPO/releases/download/${VERSION}/${BIN_NAME}-${ARCH}-linux-${LIBC}.tar.gz"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
TMPDIR="$(mktemp -d)"
|
||||||
|
trap 'rm -rf "$TMPDIR"' EXIT INT TERM
|
||||||
|
|
||||||
|
say "Installing $BIN_NAME ($VERSION) for $ARCH-linux-$LIBC..."
|
||||||
|
fetch_to_stdout "$URL" | tar -xzf - -C "$TMPDIR"
|
||||||
|
|
||||||
|
[ -f "$TMPDIR/$BIN_NAME" ] || die "archive did not contain $BIN_NAME"
|
||||||
|
|
||||||
|
install_binary "$TMPDIR/$BIN_NAME" "$INSTALL_DIR/$BIN_NAME"
|
||||||
|
|
||||||
|
say "Installed: $INSTALL_DIR/$BIN_NAME"
|
||||||
|
"$INSTALL_DIR/$BIN_NAME" --version 2>/dev/null || true
|
||||||
269
src/api/config_store.rs
Normal file
269
src/api/config_store.rs
Normal file
@@ -0,0 +1,269 @@
|
|||||||
|
use std::collections::BTreeMap;
|
||||||
|
use std::io::Write;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
use hyper::header::IF_MATCH;
|
||||||
|
use serde::Serialize;
|
||||||
|
use sha2::{Digest, Sha256};
|
||||||
|
|
||||||
|
use crate::config::ProxyConfig;
|
||||||
|
|
||||||
|
use super::model::ApiFailure;
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||||
|
pub(super) enum AccessSection {
|
||||||
|
Users,
|
||||||
|
UserAdTags,
|
||||||
|
UserMaxTcpConns,
|
||||||
|
UserExpirations,
|
||||||
|
UserDataQuota,
|
||||||
|
UserMaxUniqueIps,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AccessSection {
|
||||||
|
fn table_name(self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
Self::Users => "access.users",
|
||||||
|
Self::UserAdTags => "access.user_ad_tags",
|
||||||
|
Self::UserMaxTcpConns => "access.user_max_tcp_conns",
|
||||||
|
Self::UserExpirations => "access.user_expirations",
|
||||||
|
Self::UserDataQuota => "access.user_data_quota",
|
||||||
|
Self::UserMaxUniqueIps => "access.user_max_unique_ips",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn parse_if_match(headers: &hyper::HeaderMap) -> Option<String> {
|
||||||
|
headers
|
||||||
|
.get(IF_MATCH)
|
||||||
|
.and_then(|value| value.to_str().ok())
|
||||||
|
.map(str::trim)
|
||||||
|
.filter(|value| !value.is_empty())
|
||||||
|
.map(|value| value.trim_matches('"').to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn ensure_expected_revision(
|
||||||
|
config_path: &Path,
|
||||||
|
expected_revision: Option<&str>,
|
||||||
|
) -> Result<(), ApiFailure> {
|
||||||
|
let Some(expected) = expected_revision else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
let current = current_revision(config_path).await?;
|
||||||
|
if current != expected {
|
||||||
|
return Err(ApiFailure::new(
|
||||||
|
hyper::StatusCode::CONFLICT,
|
||||||
|
"revision_conflict",
|
||||||
|
"Config revision mismatch",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn current_revision(config_path: &Path) -> Result<String, ApiFailure> {
|
||||||
|
let content = tokio::fs::read_to_string(config_path)
|
||||||
|
.await
|
||||||
|
.map_err(|e| ApiFailure::internal(format!("failed to read config: {}", e)))?;
|
||||||
|
Ok(compute_revision(&content))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn compute_revision(content: &str) -> String {
|
||||||
|
let mut hasher = Sha256::new();
|
||||||
|
hasher.update(content.as_bytes());
|
||||||
|
hex::encode(hasher.finalize())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn load_config_from_disk(config_path: &Path) -> Result<ProxyConfig, ApiFailure> {
|
||||||
|
let config_path = config_path.to_path_buf();
|
||||||
|
tokio::task::spawn_blocking(move || ProxyConfig::load(config_path))
|
||||||
|
.await
|
||||||
|
.map_err(|e| ApiFailure::internal(format!("failed to join config loader: {}", e)))?
|
||||||
|
.map_err(|e| ApiFailure::internal(format!("failed to load config: {}", e)))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn save_config_to_disk(
|
||||||
|
config_path: &Path,
|
||||||
|
cfg: &ProxyConfig,
|
||||||
|
) -> Result<String, ApiFailure> {
|
||||||
|
let serialized = toml::to_string_pretty(cfg)
|
||||||
|
.map_err(|e| ApiFailure::internal(format!("failed to serialize config: {}", e)))?;
|
||||||
|
write_atomic(config_path.to_path_buf(), serialized.clone()).await?;
|
||||||
|
Ok(compute_revision(&serialized))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn save_access_sections_to_disk(
|
||||||
|
config_path: &Path,
|
||||||
|
cfg: &ProxyConfig,
|
||||||
|
sections: &[AccessSection],
|
||||||
|
) -> Result<String, ApiFailure> {
|
||||||
|
let mut content = tokio::fs::read_to_string(config_path)
|
||||||
|
.await
|
||||||
|
.map_err(|e| ApiFailure::internal(format!("failed to read config: {}", e)))?;
|
||||||
|
|
||||||
|
let mut applied = Vec::new();
|
||||||
|
for section in sections {
|
||||||
|
if applied.contains(section) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let rendered = render_access_section(cfg, *section)?;
|
||||||
|
content = upsert_toml_table(&content, section.table_name(), &rendered);
|
||||||
|
applied.push(*section);
|
||||||
|
}
|
||||||
|
|
||||||
|
write_atomic(config_path.to_path_buf(), content.clone()).await?;
|
||||||
|
Ok(compute_revision(&content))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_access_section(cfg: &ProxyConfig, section: AccessSection) -> Result<String, ApiFailure> {
|
||||||
|
let body = match section {
|
||||||
|
AccessSection::Users => {
|
||||||
|
let rows: BTreeMap<String, String> = cfg
|
||||||
|
.access
|
||||||
|
.users
|
||||||
|
.iter()
|
||||||
|
.map(|(key, value)| (key.clone(), value.clone()))
|
||||||
|
.collect();
|
||||||
|
serialize_table_body(&rows)?
|
||||||
|
}
|
||||||
|
AccessSection::UserAdTags => {
|
||||||
|
let rows: BTreeMap<String, String> = cfg
|
||||||
|
.access
|
||||||
|
.user_ad_tags
|
||||||
|
.iter()
|
||||||
|
.map(|(key, value)| (key.clone(), value.clone()))
|
||||||
|
.collect();
|
||||||
|
serialize_table_body(&rows)?
|
||||||
|
}
|
||||||
|
AccessSection::UserMaxTcpConns => {
|
||||||
|
let rows: BTreeMap<String, usize> = cfg
|
||||||
|
.access
|
||||||
|
.user_max_tcp_conns
|
||||||
|
.iter()
|
||||||
|
.map(|(key, value)| (key.clone(), *value))
|
||||||
|
.collect();
|
||||||
|
serialize_table_body(&rows)?
|
||||||
|
}
|
||||||
|
AccessSection::UserExpirations => {
|
||||||
|
let rows: BTreeMap<String, DateTime<Utc>> = cfg
|
||||||
|
.access
|
||||||
|
.user_expirations
|
||||||
|
.iter()
|
||||||
|
.map(|(key, value)| (key.clone(), *value))
|
||||||
|
.collect();
|
||||||
|
serialize_table_body(&rows)?
|
||||||
|
}
|
||||||
|
AccessSection::UserDataQuota => {
|
||||||
|
let rows: BTreeMap<String, u64> = cfg
|
||||||
|
.access
|
||||||
|
.user_data_quota
|
||||||
|
.iter()
|
||||||
|
.map(|(key, value)| (key.clone(), *value))
|
||||||
|
.collect();
|
||||||
|
serialize_table_body(&rows)?
|
||||||
|
}
|
||||||
|
AccessSection::UserMaxUniqueIps => {
|
||||||
|
let rows: BTreeMap<String, usize> = cfg
|
||||||
|
.access
|
||||||
|
.user_max_unique_ips
|
||||||
|
.iter()
|
||||||
|
.map(|(key, value)| (key.clone(), *value))
|
||||||
|
.collect();
|
||||||
|
serialize_table_body(&rows)?
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut out = format!("[{}]\n", section.table_name());
|
||||||
|
if !body.is_empty() {
|
||||||
|
out.push_str(&body);
|
||||||
|
}
|
||||||
|
if !out.ends_with('\n') {
|
||||||
|
out.push('\n');
|
||||||
|
}
|
||||||
|
Ok(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn serialize_table_body<T: Serialize>(value: &T) -> Result<String, ApiFailure> {
|
||||||
|
toml::to_string(value)
|
||||||
|
.map_err(|e| ApiFailure::internal(format!("failed to serialize access section: {}", e)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn upsert_toml_table(source: &str, table_name: &str, replacement: &str) -> String {
|
||||||
|
if let Some((start, end)) = find_toml_table_bounds(source, table_name) {
|
||||||
|
let mut out = String::with_capacity(source.len() + replacement.len());
|
||||||
|
out.push_str(&source[..start]);
|
||||||
|
out.push_str(replacement);
|
||||||
|
out.push_str(&source[end..]);
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut out = source.to_string();
|
||||||
|
if !out.is_empty() && !out.ends_with('\n') {
|
||||||
|
out.push('\n');
|
||||||
|
}
|
||||||
|
if !out.is_empty() {
|
||||||
|
out.push('\n');
|
||||||
|
}
|
||||||
|
out.push_str(replacement);
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
fn find_toml_table_bounds(source: &str, table_name: &str) -> Option<(usize, usize)> {
|
||||||
|
let target = format!("[{}]", table_name);
|
||||||
|
let mut offset = 0usize;
|
||||||
|
let mut start = None;
|
||||||
|
|
||||||
|
for line in source.split_inclusive('\n') {
|
||||||
|
let trimmed = line.trim();
|
||||||
|
if let Some(start_offset) = start {
|
||||||
|
if trimmed.starts_with('[') {
|
||||||
|
return Some((start_offset, offset));
|
||||||
|
}
|
||||||
|
} else if trimmed == target {
|
||||||
|
start = Some(offset);
|
||||||
|
}
|
||||||
|
offset = offset.saturating_add(line.len());
|
||||||
|
}
|
||||||
|
|
||||||
|
start.map(|start_offset| (start_offset, source.len()))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn write_atomic(path: PathBuf, contents: String) -> Result<(), ApiFailure> {
|
||||||
|
tokio::task::spawn_blocking(move || write_atomic_sync(&path, &contents))
|
||||||
|
.await
|
||||||
|
.map_err(|e| ApiFailure::internal(format!("failed to join writer: {}", e)))?
|
||||||
|
.map_err(|e| ApiFailure::internal(format!("failed to write config: {}", e)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_atomic_sync(path: &Path, contents: &str) -> std::io::Result<()> {
|
||||||
|
let parent = path.parent().unwrap_or_else(|| Path::new("."));
|
||||||
|
std::fs::create_dir_all(parent)?;
|
||||||
|
|
||||||
|
let tmp_name = format!(
|
||||||
|
".{}.tmp-{}",
|
||||||
|
path.file_name()
|
||||||
|
.and_then(|s| s.to_str())
|
||||||
|
.unwrap_or("config.toml"),
|
||||||
|
rand::random::<u64>()
|
||||||
|
);
|
||||||
|
let tmp_path = parent.join(tmp_name);
|
||||||
|
|
||||||
|
let write_result = (|| {
|
||||||
|
let mut file = std::fs::OpenOptions::new()
|
||||||
|
.create_new(true)
|
||||||
|
.write(true)
|
||||||
|
.open(&tmp_path)?;
|
||||||
|
file.write_all(contents.as_bytes())?;
|
||||||
|
file.sync_all()?;
|
||||||
|
std::fs::rename(&tmp_path, path)?;
|
||||||
|
if let Ok(dir) = std::fs::File::open(parent) {
|
||||||
|
let _ = dir.sync_all();
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
})();
|
||||||
|
|
||||||
|
if write_result.is_err() {
|
||||||
|
let _ = std::fs::remove_file(&tmp_path);
|
||||||
|
}
|
||||||
|
write_result
|
||||||
|
}
|
||||||
90
src/api/events.rs
Normal file
90
src/api/events.rs
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
use std::collections::VecDeque;
|
||||||
|
use std::sync::Mutex;
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
#[derive(Clone, Serialize)]
|
||||||
|
pub(super) struct ApiEventRecord {
|
||||||
|
pub(super) seq: u64,
|
||||||
|
pub(super) ts_epoch_secs: u64,
|
||||||
|
pub(super) event_type: String,
|
||||||
|
pub(super) context: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Serialize)]
|
||||||
|
pub(super) struct ApiEventSnapshot {
|
||||||
|
pub(super) capacity: usize,
|
||||||
|
pub(super) dropped_total: u64,
|
||||||
|
pub(super) events: Vec<ApiEventRecord>,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ApiEventsInner {
|
||||||
|
capacity: usize,
|
||||||
|
dropped_total: u64,
|
||||||
|
next_seq: u64,
|
||||||
|
events: VecDeque<ApiEventRecord>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Bounded ring-buffer for control-plane API/runtime events.
|
||||||
|
pub(crate) struct ApiEventStore {
|
||||||
|
inner: Mutex<ApiEventsInner>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ApiEventStore {
|
||||||
|
pub(super) fn new(capacity: usize) -> Self {
|
||||||
|
let bounded = capacity.max(16);
|
||||||
|
Self {
|
||||||
|
inner: Mutex::new(ApiEventsInner {
|
||||||
|
capacity: bounded,
|
||||||
|
dropped_total: 0,
|
||||||
|
next_seq: 1,
|
||||||
|
events: VecDeque::with_capacity(bounded),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn record(&self, event_type: &str, context: impl Into<String>) {
|
||||||
|
let now_epoch_secs = SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap_or_default()
|
||||||
|
.as_secs();
|
||||||
|
let mut context = context.into();
|
||||||
|
if context.len() > 256 {
|
||||||
|
context.truncate(256);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut guard = self.inner.lock().expect("api event store mutex poisoned");
|
||||||
|
if guard.events.len() == guard.capacity {
|
||||||
|
guard.events.pop_front();
|
||||||
|
guard.dropped_total = guard.dropped_total.saturating_add(1);
|
||||||
|
}
|
||||||
|
let seq = guard.next_seq;
|
||||||
|
guard.next_seq = guard.next_seq.saturating_add(1);
|
||||||
|
guard.events.push_back(ApiEventRecord {
|
||||||
|
seq,
|
||||||
|
ts_epoch_secs: now_epoch_secs,
|
||||||
|
event_type: event_type.to_string(),
|
||||||
|
context,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn snapshot(&self, limit: usize) -> ApiEventSnapshot {
|
||||||
|
let guard = self.inner.lock().expect("api event store mutex poisoned");
|
||||||
|
let bounded_limit = limit.clamp(1, guard.capacity.max(1));
|
||||||
|
let mut items: Vec<ApiEventRecord> = guard
|
||||||
|
.events
|
||||||
|
.iter()
|
||||||
|
.rev()
|
||||||
|
.take(bounded_limit)
|
||||||
|
.cloned()
|
||||||
|
.collect();
|
||||||
|
items.reverse();
|
||||||
|
|
||||||
|
ApiEventSnapshot {
|
||||||
|
capacity: guard.capacity,
|
||||||
|
dropped_total: guard.dropped_total,
|
||||||
|
events: items,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
91
src/api/http_utils.rs
Normal file
91
src/api/http_utils.rs
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
use http_body_util::{BodyExt, Full};
|
||||||
|
use hyper::StatusCode;
|
||||||
|
use hyper::body::{Bytes, Incoming};
|
||||||
|
use serde::Serialize;
|
||||||
|
use serde::de::DeserializeOwned;
|
||||||
|
|
||||||
|
use super::model::{ApiFailure, ErrorBody, ErrorResponse, SuccessResponse};
|
||||||
|
|
||||||
|
pub(super) fn success_response<T: Serialize>(
|
||||||
|
status: StatusCode,
|
||||||
|
data: T,
|
||||||
|
revision: String,
|
||||||
|
) -> hyper::Response<Full<Bytes>> {
|
||||||
|
let payload = SuccessResponse {
|
||||||
|
ok: true,
|
||||||
|
data,
|
||||||
|
revision,
|
||||||
|
};
|
||||||
|
let body = serde_json::to_vec(&payload).unwrap_or_else(|_| b"{\"ok\":false}".to_vec());
|
||||||
|
hyper::Response::builder()
|
||||||
|
.status(status)
|
||||||
|
.header("content-type", "application/json; charset=utf-8")
|
||||||
|
.body(Full::new(Bytes::from(body)))
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn error_response(
|
||||||
|
request_id: u64,
|
||||||
|
failure: ApiFailure,
|
||||||
|
) -> hyper::Response<Full<Bytes>> {
|
||||||
|
let payload = ErrorResponse {
|
||||||
|
ok: false,
|
||||||
|
error: ErrorBody {
|
||||||
|
code: failure.code,
|
||||||
|
message: failure.message,
|
||||||
|
},
|
||||||
|
request_id,
|
||||||
|
};
|
||||||
|
let body = serde_json::to_vec(&payload).unwrap_or_else(|_| {
|
||||||
|
format!(
|
||||||
|
"{{\"ok\":false,\"error\":{{\"code\":\"internal_error\",\"message\":\"serialization failed\"}},\"request_id\":{}}}",
|
||||||
|
request_id
|
||||||
|
)
|
||||||
|
.into_bytes()
|
||||||
|
});
|
||||||
|
hyper::Response::builder()
|
||||||
|
.status(failure.status)
|
||||||
|
.header("content-type", "application/json; charset=utf-8")
|
||||||
|
.body(Full::new(Bytes::from(body)))
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn read_json<T: DeserializeOwned>(
|
||||||
|
body: Incoming,
|
||||||
|
limit: usize,
|
||||||
|
) -> Result<T, ApiFailure> {
|
||||||
|
let bytes = read_body_with_limit(body, limit).await?;
|
||||||
|
serde_json::from_slice(&bytes).map_err(|_| ApiFailure::bad_request("Invalid JSON body"))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn read_optional_json<T: DeserializeOwned>(
|
||||||
|
body: Incoming,
|
||||||
|
limit: usize,
|
||||||
|
) -> Result<Option<T>, ApiFailure> {
|
||||||
|
let bytes = read_body_with_limit(body, limit).await?;
|
||||||
|
if bytes.is_empty() {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
serde_json::from_slice(&bytes)
|
||||||
|
.map(Some)
|
||||||
|
.map_err(|_| ApiFailure::bad_request("Invalid JSON body"))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn read_body_with_limit(body: Incoming, limit: usize) -> Result<Vec<u8>, ApiFailure> {
|
||||||
|
let mut collected = Vec::new();
|
||||||
|
let mut body = body;
|
||||||
|
while let Some(frame_result) = body.frame().await {
|
||||||
|
let frame = frame_result.map_err(|_| ApiFailure::bad_request("Invalid request body"))?;
|
||||||
|
if let Some(chunk) = frame.data_ref() {
|
||||||
|
if collected.len().saturating_add(chunk.len()) > limit {
|
||||||
|
return Err(ApiFailure::new(
|
||||||
|
StatusCode::PAYLOAD_TOO_LARGE,
|
||||||
|
"payload_too_large",
|
||||||
|
format!("Body exceeds {} bytes", limit),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
collected.extend_from_slice(chunk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(collected)
|
||||||
|
}
|
||||||
554
src/api/mod.rs
Normal file
554
src/api/mod.rs
Normal file
@@ -0,0 +1,554 @@
|
|||||||
|
use std::convert::Infallible;
|
||||||
|
use std::net::{IpAddr, SocketAddr};
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
|
||||||
|
|
||||||
|
use http_body_util::Full;
|
||||||
|
use hyper::body::{Bytes, Incoming};
|
||||||
|
use hyper::header::AUTHORIZATION;
|
||||||
|
use hyper::server::conn::http1;
|
||||||
|
use hyper::service::service_fn;
|
||||||
|
use hyper::{Method, Request, Response, StatusCode};
|
||||||
|
use tokio::net::TcpListener;
|
||||||
|
use tokio::sync::{Mutex, RwLock, watch};
|
||||||
|
use tracing::{debug, info, warn};
|
||||||
|
|
||||||
|
use crate::config::ProxyConfig;
|
||||||
|
use crate::ip_tracker::UserIpTracker;
|
||||||
|
use crate::proxy::route_mode::RouteRuntimeController;
|
||||||
|
use crate::startup::StartupTracker;
|
||||||
|
use crate::stats::Stats;
|
||||||
|
use crate::transport::middle_proxy::MePool;
|
||||||
|
use crate::transport::UpstreamManager;
|
||||||
|
|
||||||
|
mod config_store;
|
||||||
|
mod events;
|
||||||
|
mod http_utils;
|
||||||
|
mod model;
|
||||||
|
mod runtime_edge;
|
||||||
|
mod runtime_init;
|
||||||
|
mod runtime_min;
|
||||||
|
mod runtime_selftest;
|
||||||
|
mod runtime_stats;
|
||||||
|
mod runtime_watch;
|
||||||
|
mod runtime_zero;
|
||||||
|
mod users;
|
||||||
|
|
||||||
|
use config_store::{current_revision, parse_if_match};
|
||||||
|
use http_utils::{error_response, read_json, read_optional_json, success_response};
|
||||||
|
use events::ApiEventStore;
|
||||||
|
use model::{
|
||||||
|
ApiFailure, CreateUserRequest, HealthData, PatchUserRequest, RotateSecretRequest, SummaryData,
|
||||||
|
};
|
||||||
|
use runtime_edge::{
|
||||||
|
EdgeConnectionsCacheEntry, build_runtime_connections_summary_data,
|
||||||
|
build_runtime_events_recent_data,
|
||||||
|
};
|
||||||
|
use runtime_init::build_runtime_initialization_data;
|
||||||
|
use runtime_min::{
|
||||||
|
build_runtime_me_pool_state_data, build_runtime_me_quality_data, build_runtime_nat_stun_data,
|
||||||
|
build_runtime_upstream_quality_data, build_security_whitelist_data,
|
||||||
|
};
|
||||||
|
use runtime_selftest::build_runtime_me_selftest_data;
|
||||||
|
use runtime_stats::{
|
||||||
|
MinimalCacheEntry, build_dcs_data, build_me_writers_data, build_minimal_all_data,
|
||||||
|
build_upstreams_data, build_zero_all_data,
|
||||||
|
};
|
||||||
|
use runtime_zero::{
|
||||||
|
build_limits_effective_data, build_runtime_gates_data, build_security_posture_data,
|
||||||
|
build_system_info_data,
|
||||||
|
};
|
||||||
|
use runtime_watch::spawn_runtime_watchers;
|
||||||
|
use users::{create_user, delete_user, patch_user, rotate_secret, users_from_config};
|
||||||
|
|
||||||
|
pub(super) struct ApiRuntimeState {
|
||||||
|
pub(super) process_started_at_epoch_secs: u64,
|
||||||
|
pub(super) config_reload_count: AtomicU64,
|
||||||
|
pub(super) last_config_reload_epoch_secs: AtomicU64,
|
||||||
|
pub(super) admission_open: AtomicBool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(super) struct ApiShared {
|
||||||
|
pub(super) stats: Arc<Stats>,
|
||||||
|
pub(super) ip_tracker: Arc<UserIpTracker>,
|
||||||
|
pub(super) me_pool: Arc<RwLock<Option<Arc<MePool>>>>,
|
||||||
|
pub(super) upstream_manager: Arc<UpstreamManager>,
|
||||||
|
pub(super) config_path: PathBuf,
|
||||||
|
pub(super) detected_ips_rx: watch::Receiver<(Option<IpAddr>, Option<IpAddr>)>,
|
||||||
|
pub(super) mutation_lock: Arc<Mutex<()>>,
|
||||||
|
pub(super) minimal_cache: Arc<Mutex<Option<MinimalCacheEntry>>>,
|
||||||
|
pub(super) runtime_edge_connections_cache: Arc<Mutex<Option<EdgeConnectionsCacheEntry>>>,
|
||||||
|
pub(super) runtime_edge_recompute_lock: Arc<Mutex<()>>,
|
||||||
|
pub(super) runtime_events: Arc<ApiEventStore>,
|
||||||
|
pub(super) request_id: Arc<AtomicU64>,
|
||||||
|
pub(super) runtime_state: Arc<ApiRuntimeState>,
|
||||||
|
pub(super) startup_tracker: Arc<StartupTracker>,
|
||||||
|
pub(super) route_runtime: Arc<RouteRuntimeController>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ApiShared {
|
||||||
|
fn next_request_id(&self) -> u64 {
|
||||||
|
self.request_id.fetch_add(1, Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn detected_link_ips(&self) -> (Option<IpAddr>, Option<IpAddr>) {
|
||||||
|
*self.detected_ips_rx.borrow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn serve(
|
||||||
|
listen: SocketAddr,
|
||||||
|
stats: Arc<Stats>,
|
||||||
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
|
me_pool: Arc<RwLock<Option<Arc<MePool>>>>,
|
||||||
|
route_runtime: Arc<RouteRuntimeController>,
|
||||||
|
upstream_manager: Arc<UpstreamManager>,
|
||||||
|
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||||
|
admission_rx: watch::Receiver<bool>,
|
||||||
|
config_path: PathBuf,
|
||||||
|
detected_ips_rx: watch::Receiver<(Option<IpAddr>, Option<IpAddr>)>,
|
||||||
|
process_started_at_epoch_secs: u64,
|
||||||
|
startup_tracker: Arc<StartupTracker>,
|
||||||
|
) {
|
||||||
|
let listener = match TcpListener::bind(listen).await {
|
||||||
|
Ok(listener) => listener,
|
||||||
|
Err(error) => {
|
||||||
|
warn!(
|
||||||
|
error = %error,
|
||||||
|
listen = %listen,
|
||||||
|
"Failed to bind API listener"
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
info!("API endpoint: http://{}/v1/*", listen);
|
||||||
|
|
||||||
|
let runtime_state = Arc::new(ApiRuntimeState {
|
||||||
|
process_started_at_epoch_secs,
|
||||||
|
config_reload_count: AtomicU64::new(0),
|
||||||
|
last_config_reload_epoch_secs: AtomicU64::new(0),
|
||||||
|
admission_open: AtomicBool::new(*admission_rx.borrow()),
|
||||||
|
});
|
||||||
|
|
||||||
|
let shared = Arc::new(ApiShared {
|
||||||
|
stats,
|
||||||
|
ip_tracker,
|
||||||
|
me_pool,
|
||||||
|
upstream_manager,
|
||||||
|
config_path,
|
||||||
|
detected_ips_rx,
|
||||||
|
mutation_lock: Arc::new(Mutex::new(())),
|
||||||
|
minimal_cache: Arc::new(Mutex::new(None)),
|
||||||
|
runtime_edge_connections_cache: Arc::new(Mutex::new(None)),
|
||||||
|
runtime_edge_recompute_lock: Arc::new(Mutex::new(())),
|
||||||
|
runtime_events: Arc::new(ApiEventStore::new(
|
||||||
|
config_rx.borrow().server.api.runtime_edge_events_capacity,
|
||||||
|
)),
|
||||||
|
request_id: Arc::new(AtomicU64::new(1)),
|
||||||
|
runtime_state: runtime_state.clone(),
|
||||||
|
startup_tracker,
|
||||||
|
route_runtime,
|
||||||
|
});
|
||||||
|
|
||||||
|
spawn_runtime_watchers(
|
||||||
|
config_rx.clone(),
|
||||||
|
admission_rx.clone(),
|
||||||
|
runtime_state.clone(),
|
||||||
|
shared.runtime_events.clone(),
|
||||||
|
);
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let (stream, peer) = match listener.accept().await {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(error) => {
|
||||||
|
warn!(error = %error, "API accept error");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let shared_conn = shared.clone();
|
||||||
|
let config_rx_conn = config_rx.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let svc = service_fn(move |req: Request<Incoming>| {
|
||||||
|
let shared_req = shared_conn.clone();
|
||||||
|
let config_rx_req = config_rx_conn.clone();
|
||||||
|
async move { handle(req, peer, shared_req, config_rx_req).await }
|
||||||
|
});
|
||||||
|
if let Err(error) = http1::Builder::new()
|
||||||
|
.serve_connection(hyper_util::rt::TokioIo::new(stream), svc)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
debug!(error = %error, "API connection error");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle(
|
||||||
|
req: Request<Incoming>,
|
||||||
|
peer: SocketAddr,
|
||||||
|
shared: Arc<ApiShared>,
|
||||||
|
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||||
|
) -> Result<Response<Full<Bytes>>, Infallible> {
|
||||||
|
let request_id = shared.next_request_id();
|
||||||
|
let cfg = config_rx.borrow().clone();
|
||||||
|
let api_cfg = &cfg.server.api;
|
||||||
|
|
||||||
|
if !api_cfg.enabled {
|
||||||
|
return Ok(error_response(
|
||||||
|
request_id,
|
||||||
|
ApiFailure::new(
|
||||||
|
StatusCode::SERVICE_UNAVAILABLE,
|
||||||
|
"api_disabled",
|
||||||
|
"API is disabled",
|
||||||
|
),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if !api_cfg.whitelist.is_empty()
|
||||||
|
&& !api_cfg
|
||||||
|
.whitelist
|
||||||
|
.iter()
|
||||||
|
.any(|net| net.contains(peer.ip()))
|
||||||
|
{
|
||||||
|
return Ok(error_response(
|
||||||
|
request_id,
|
||||||
|
ApiFailure::new(StatusCode::FORBIDDEN, "forbidden", "Source IP is not allowed"),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if !api_cfg.auth_header.is_empty() {
|
||||||
|
let auth_ok = req
|
||||||
|
.headers()
|
||||||
|
.get(AUTHORIZATION)
|
||||||
|
.and_then(|v| v.to_str().ok())
|
||||||
|
.map(|v| v == api_cfg.auth_header)
|
||||||
|
.unwrap_or(false);
|
||||||
|
if !auth_ok {
|
||||||
|
return Ok(error_response(
|
||||||
|
request_id,
|
||||||
|
ApiFailure::new(
|
||||||
|
StatusCode::UNAUTHORIZED,
|
||||||
|
"unauthorized",
|
||||||
|
"Missing or invalid Authorization header",
|
||||||
|
),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let method = req.method().clone();
|
||||||
|
let path = req.uri().path().to_string();
|
||||||
|
let query = req.uri().query().map(str::to_string);
|
||||||
|
let body_limit = api_cfg.request_body_limit_bytes;
|
||||||
|
|
||||||
|
let result: Result<Response<Full<Bytes>>, ApiFailure> = async {
|
||||||
|
match (method.as_str(), path.as_str()) {
|
||||||
|
("GET", "/v1/health") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = HealthData {
|
||||||
|
status: "ok",
|
||||||
|
read_only: api_cfg.read_only,
|
||||||
|
};
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/system/info") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = build_system_info_data(shared.as_ref(), cfg.as_ref(), &revision);
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/runtime/gates") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = build_runtime_gates_data(shared.as_ref(), cfg.as_ref()).await;
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/runtime/initialization") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = build_runtime_initialization_data(shared.as_ref()).await;
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/limits/effective") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = build_limits_effective_data(cfg.as_ref());
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/security/posture") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = build_security_posture_data(cfg.as_ref());
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/security/whitelist") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = build_security_whitelist_data(cfg.as_ref());
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/stats/summary") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = SummaryData {
|
||||||
|
uptime_seconds: shared.stats.uptime_secs(),
|
||||||
|
connections_total: shared.stats.get_connects_all(),
|
||||||
|
connections_bad_total: shared.stats.get_connects_bad(),
|
||||||
|
handshake_timeouts_total: shared.stats.get_handshake_timeouts(),
|
||||||
|
configured_users: cfg.access.users.len(),
|
||||||
|
};
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/stats/zero/all") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = build_zero_all_data(&shared.stats, cfg.access.users.len());
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/stats/upstreams") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = build_upstreams_data(shared.as_ref(), api_cfg);
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/stats/minimal/all") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = build_minimal_all_data(shared.as_ref(), api_cfg).await;
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/stats/me-writers") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = build_me_writers_data(shared.as_ref(), api_cfg).await;
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/stats/dcs") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = build_dcs_data(shared.as_ref(), api_cfg).await;
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/runtime/me_pool_state") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = build_runtime_me_pool_state_data(shared.as_ref()).await;
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/runtime/me_quality") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = build_runtime_me_quality_data(shared.as_ref()).await;
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/runtime/upstream_quality") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = build_runtime_upstream_quality_data(shared.as_ref()).await;
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/runtime/nat_stun") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = build_runtime_nat_stun_data(shared.as_ref()).await;
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/runtime/me-selftest") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = build_runtime_me_selftest_data(shared.as_ref(), cfg.as_ref()).await;
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/runtime/connections/summary") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = build_runtime_connections_summary_data(shared.as_ref(), cfg.as_ref()).await;
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/runtime/events/recent") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let data = build_runtime_events_recent_data(
|
||||||
|
shared.as_ref(),
|
||||||
|
cfg.as_ref(),
|
||||||
|
query.as_deref(),
|
||||||
|
);
|
||||||
|
Ok(success_response(StatusCode::OK, data, revision))
|
||||||
|
}
|
||||||
|
("GET", "/v1/stats/users") | ("GET", "/v1/users") => {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||||
|
let users = users_from_config(
|
||||||
|
&cfg,
|
||||||
|
&shared.stats,
|
||||||
|
&shared.ip_tracker,
|
||||||
|
detected_ip_v4,
|
||||||
|
detected_ip_v6,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
Ok(success_response(StatusCode::OK, users, revision))
|
||||||
|
}
|
||||||
|
("POST", "/v1/users") => {
|
||||||
|
if api_cfg.read_only {
|
||||||
|
return Ok(error_response(
|
||||||
|
request_id,
|
||||||
|
ApiFailure::new(
|
||||||
|
StatusCode::FORBIDDEN,
|
||||||
|
"read_only",
|
||||||
|
"API runs in read-only mode",
|
||||||
|
),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let expected_revision = parse_if_match(req.headers());
|
||||||
|
let body = read_json::<CreateUserRequest>(req.into_body(), body_limit).await?;
|
||||||
|
let result = create_user(body, expected_revision, &shared).await;
|
||||||
|
let (data, revision) = match result {
|
||||||
|
Ok(ok) => ok,
|
||||||
|
Err(error) => {
|
||||||
|
shared.runtime_events.record("api.user.create.failed", error.code);
|
||||||
|
return Err(error);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
shared
|
||||||
|
.runtime_events
|
||||||
|
.record("api.user.create.ok", format!("username={}", data.user.username));
|
||||||
|
Ok(success_response(StatusCode::CREATED, data, revision))
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
if let Some(user) = path.strip_prefix("/v1/users/")
|
||||||
|
&& !user.is_empty()
|
||||||
|
&& !user.contains('/')
|
||||||
|
{
|
||||||
|
if method == Method::GET {
|
||||||
|
let revision = current_revision(&shared.config_path).await?;
|
||||||
|
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||||
|
let users = users_from_config(
|
||||||
|
&cfg,
|
||||||
|
&shared.stats,
|
||||||
|
&shared.ip_tracker,
|
||||||
|
detected_ip_v4,
|
||||||
|
detected_ip_v6,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
if let Some(user_info) = users.into_iter().find(|entry| entry.username == user)
|
||||||
|
{
|
||||||
|
return Ok(success_response(StatusCode::OK, user_info, revision));
|
||||||
|
}
|
||||||
|
return Ok(error_response(
|
||||||
|
request_id,
|
||||||
|
ApiFailure::new(StatusCode::NOT_FOUND, "not_found", "User not found"),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if method == Method::PATCH {
|
||||||
|
if api_cfg.read_only {
|
||||||
|
return Ok(error_response(
|
||||||
|
request_id,
|
||||||
|
ApiFailure::new(
|
||||||
|
StatusCode::FORBIDDEN,
|
||||||
|
"read_only",
|
||||||
|
"API runs in read-only mode",
|
||||||
|
),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let expected_revision = parse_if_match(req.headers());
|
||||||
|
let body = read_json::<PatchUserRequest>(req.into_body(), body_limit).await?;
|
||||||
|
let result = patch_user(user, body, expected_revision, &shared).await;
|
||||||
|
let (data, revision) = match result {
|
||||||
|
Ok(ok) => ok,
|
||||||
|
Err(error) => {
|
||||||
|
shared.runtime_events.record(
|
||||||
|
"api.user.patch.failed",
|
||||||
|
format!("username={} code={}", user, error.code),
|
||||||
|
);
|
||||||
|
return Err(error);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
shared
|
||||||
|
.runtime_events
|
||||||
|
.record("api.user.patch.ok", format!("username={}", data.username));
|
||||||
|
return Ok(success_response(StatusCode::OK, data, revision));
|
||||||
|
}
|
||||||
|
if method == Method::DELETE {
|
||||||
|
if api_cfg.read_only {
|
||||||
|
return Ok(error_response(
|
||||||
|
request_id,
|
||||||
|
ApiFailure::new(
|
||||||
|
StatusCode::FORBIDDEN,
|
||||||
|
"read_only",
|
||||||
|
"API runs in read-only mode",
|
||||||
|
),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let expected_revision = parse_if_match(req.headers());
|
||||||
|
let result = delete_user(user, expected_revision, &shared).await;
|
||||||
|
let (deleted_user, revision) = match result {
|
||||||
|
Ok(ok) => ok,
|
||||||
|
Err(error) => {
|
||||||
|
shared.runtime_events.record(
|
||||||
|
"api.user.delete.failed",
|
||||||
|
format!("username={} code={}", user, error.code),
|
||||||
|
);
|
||||||
|
return Err(error);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
shared.runtime_events.record(
|
||||||
|
"api.user.delete.ok",
|
||||||
|
format!("username={}", deleted_user),
|
||||||
|
);
|
||||||
|
return Ok(success_response(StatusCode::OK, deleted_user, revision));
|
||||||
|
}
|
||||||
|
if method == Method::POST
|
||||||
|
&& let Some(base_user) = user.strip_suffix("/rotate-secret")
|
||||||
|
&& !base_user.is_empty()
|
||||||
|
&& !base_user.contains('/')
|
||||||
|
{
|
||||||
|
if api_cfg.read_only {
|
||||||
|
return Ok(error_response(
|
||||||
|
request_id,
|
||||||
|
ApiFailure::new(
|
||||||
|
StatusCode::FORBIDDEN,
|
||||||
|
"read_only",
|
||||||
|
"API runs in read-only mode",
|
||||||
|
),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let expected_revision = parse_if_match(req.headers());
|
||||||
|
let body =
|
||||||
|
read_optional_json::<RotateSecretRequest>(req.into_body(), body_limit)
|
||||||
|
.await?;
|
||||||
|
let result = rotate_secret(
|
||||||
|
base_user,
|
||||||
|
body.unwrap_or_default(),
|
||||||
|
expected_revision,
|
||||||
|
&shared,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let (data, revision) = match result {
|
||||||
|
Ok(ok) => ok,
|
||||||
|
Err(error) => {
|
||||||
|
shared.runtime_events.record(
|
||||||
|
"api.user.rotate_secret.failed",
|
||||||
|
format!("username={} code={}", base_user, error.code),
|
||||||
|
);
|
||||||
|
return Err(error);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
shared.runtime_events.record(
|
||||||
|
"api.user.rotate_secret.ok",
|
||||||
|
format!("username={}", base_user),
|
||||||
|
);
|
||||||
|
return Ok(success_response(StatusCode::OK, data, revision));
|
||||||
|
}
|
||||||
|
if method == Method::POST {
|
||||||
|
return Ok(error_response(
|
||||||
|
request_id,
|
||||||
|
ApiFailure::new(StatusCode::NOT_FOUND, "not_found", "Route not found"),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
return Ok(error_response(
|
||||||
|
request_id,
|
||||||
|
ApiFailure::new(
|
||||||
|
StatusCode::METHOD_NOT_ALLOWED,
|
||||||
|
"method_not_allowed",
|
||||||
|
"Unsupported HTTP method for this route",
|
||||||
|
),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok(error_response(
|
||||||
|
request_id,
|
||||||
|
ApiFailure::new(StatusCode::NOT_FOUND, "not_found", "Route not found"),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Ok(resp) => Ok(resp),
|
||||||
|
Err(error) => Ok(error_response(request_id, error)),
|
||||||
|
}
|
||||||
|
}
|
||||||
477
src/api/model.rs
Normal file
477
src/api/model.rs
Normal file
@@ -0,0 +1,477 @@
|
|||||||
|
use std::net::IpAddr;
|
||||||
|
|
||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
use hyper::StatusCode;
|
||||||
|
use rand::Rng;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
const MAX_USERNAME_LEN: usize = 64;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub(super) struct ApiFailure {
|
||||||
|
pub(super) status: StatusCode,
|
||||||
|
pub(super) code: &'static str,
|
||||||
|
pub(super) message: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ApiFailure {
|
||||||
|
pub(super) fn new(status: StatusCode, code: &'static str, message: impl Into<String>) -> Self {
|
||||||
|
Self {
|
||||||
|
status,
|
||||||
|
code,
|
||||||
|
message: message.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn internal(message: impl Into<String>) -> Self {
|
||||||
|
Self::new(StatusCode::INTERNAL_SERVER_ERROR, "internal_error", message)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn bad_request(message: impl Into<String>) -> Self {
|
||||||
|
Self::new(StatusCode::BAD_REQUEST, "bad_request", message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct ErrorBody {
|
||||||
|
pub(super) code: &'static str,
|
||||||
|
pub(super) message: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct ErrorResponse {
|
||||||
|
pub(super) ok: bool,
|
||||||
|
pub(super) error: ErrorBody,
|
||||||
|
pub(super) request_id: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct SuccessResponse<T> {
|
||||||
|
pub(super) ok: bool,
|
||||||
|
pub(super) data: T,
|
||||||
|
pub(super) revision: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct HealthData {
|
||||||
|
pub(super) status: &'static str,
|
||||||
|
pub(super) read_only: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct SummaryData {
|
||||||
|
pub(super) uptime_seconds: f64,
|
||||||
|
pub(super) connections_total: u64,
|
||||||
|
pub(super) connections_bad_total: u64,
|
||||||
|
pub(super) handshake_timeouts_total: u64,
|
||||||
|
pub(super) configured_users: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct ZeroCodeCount {
|
||||||
|
pub(super) code: i32,
|
||||||
|
pub(super) total: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct ZeroCoreData {
|
||||||
|
pub(super) uptime_seconds: f64,
|
||||||
|
pub(super) connections_total: u64,
|
||||||
|
pub(super) connections_bad_total: u64,
|
||||||
|
pub(super) handshake_timeouts_total: u64,
|
||||||
|
pub(super) configured_users: usize,
|
||||||
|
pub(super) telemetry_core_enabled: bool,
|
||||||
|
pub(super) telemetry_user_enabled: bool,
|
||||||
|
pub(super) telemetry_me_level: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct ZeroUpstreamData {
|
||||||
|
pub(super) connect_attempt_total: u64,
|
||||||
|
pub(super) connect_success_total: u64,
|
||||||
|
pub(super) connect_fail_total: u64,
|
||||||
|
pub(super) connect_failfast_hard_error_total: u64,
|
||||||
|
pub(super) connect_attempts_bucket_1: u64,
|
||||||
|
pub(super) connect_attempts_bucket_2: u64,
|
||||||
|
pub(super) connect_attempts_bucket_3_4: u64,
|
||||||
|
pub(super) connect_attempts_bucket_gt_4: u64,
|
||||||
|
pub(super) connect_duration_success_bucket_le_100ms: u64,
|
||||||
|
pub(super) connect_duration_success_bucket_101_500ms: u64,
|
||||||
|
pub(super) connect_duration_success_bucket_501_1000ms: u64,
|
||||||
|
pub(super) connect_duration_success_bucket_gt_1000ms: u64,
|
||||||
|
pub(super) connect_duration_fail_bucket_le_100ms: u64,
|
||||||
|
pub(super) connect_duration_fail_bucket_101_500ms: u64,
|
||||||
|
pub(super) connect_duration_fail_bucket_501_1000ms: u64,
|
||||||
|
pub(super) connect_duration_fail_bucket_gt_1000ms: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct UpstreamDcStatus {
|
||||||
|
pub(super) dc: i16,
|
||||||
|
pub(super) latency_ema_ms: Option<f64>,
|
||||||
|
pub(super) ip_preference: &'static str,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct UpstreamStatus {
|
||||||
|
pub(super) upstream_id: usize,
|
||||||
|
pub(super) route_kind: &'static str,
|
||||||
|
pub(super) address: String,
|
||||||
|
pub(super) weight: u16,
|
||||||
|
pub(super) scopes: String,
|
||||||
|
pub(super) healthy: bool,
|
||||||
|
pub(super) fails: u32,
|
||||||
|
pub(super) last_check_age_secs: u64,
|
||||||
|
pub(super) effective_latency_ms: Option<f64>,
|
||||||
|
pub(super) dc: Vec<UpstreamDcStatus>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct UpstreamSummaryData {
|
||||||
|
pub(super) configured_total: usize,
|
||||||
|
pub(super) healthy_total: usize,
|
||||||
|
pub(super) unhealthy_total: usize,
|
||||||
|
pub(super) direct_total: usize,
|
||||||
|
pub(super) socks4_total: usize,
|
||||||
|
pub(super) socks5_total: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct UpstreamsData {
|
||||||
|
pub(super) enabled: bool,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) reason: Option<&'static str>,
|
||||||
|
pub(super) generated_at_epoch_secs: u64,
|
||||||
|
pub(super) zero: ZeroUpstreamData,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) summary: Option<UpstreamSummaryData>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) upstreams: Option<Vec<UpstreamStatus>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct ZeroMiddleProxyData {
|
||||||
|
pub(super) keepalive_sent_total: u64,
|
||||||
|
pub(super) keepalive_failed_total: u64,
|
||||||
|
pub(super) keepalive_pong_total: u64,
|
||||||
|
pub(super) keepalive_timeout_total: u64,
|
||||||
|
pub(super) rpc_proxy_req_signal_sent_total: u64,
|
||||||
|
pub(super) rpc_proxy_req_signal_failed_total: u64,
|
||||||
|
pub(super) rpc_proxy_req_signal_skipped_no_meta_total: u64,
|
||||||
|
pub(super) rpc_proxy_req_signal_response_total: u64,
|
||||||
|
pub(super) rpc_proxy_req_signal_close_sent_total: u64,
|
||||||
|
pub(super) reconnect_attempt_total: u64,
|
||||||
|
pub(super) reconnect_success_total: u64,
|
||||||
|
pub(super) handshake_reject_total: u64,
|
||||||
|
pub(super) handshake_error_codes: Vec<ZeroCodeCount>,
|
||||||
|
pub(super) reader_eof_total: u64,
|
||||||
|
pub(super) idle_close_by_peer_total: u64,
|
||||||
|
pub(super) route_drop_no_conn_total: u64,
|
||||||
|
pub(super) route_drop_channel_closed_total: u64,
|
||||||
|
pub(super) route_drop_queue_full_total: u64,
|
||||||
|
pub(super) route_drop_queue_full_base_total: u64,
|
||||||
|
pub(super) route_drop_queue_full_high_total: u64,
|
||||||
|
pub(super) socks_kdf_strict_reject_total: u64,
|
||||||
|
pub(super) socks_kdf_compat_fallback_total: u64,
|
||||||
|
pub(super) endpoint_quarantine_total: u64,
|
||||||
|
pub(super) kdf_drift_total: u64,
|
||||||
|
pub(super) kdf_port_only_drift_total: u64,
|
||||||
|
pub(super) hardswap_pending_reuse_total: u64,
|
||||||
|
pub(super) hardswap_pending_ttl_expired_total: u64,
|
||||||
|
pub(super) single_endpoint_outage_enter_total: u64,
|
||||||
|
pub(super) single_endpoint_outage_exit_total: u64,
|
||||||
|
pub(super) single_endpoint_outage_reconnect_attempt_total: u64,
|
||||||
|
pub(super) single_endpoint_outage_reconnect_success_total: u64,
|
||||||
|
pub(super) single_endpoint_quarantine_bypass_total: u64,
|
||||||
|
pub(super) single_endpoint_shadow_rotate_total: u64,
|
||||||
|
pub(super) single_endpoint_shadow_rotate_skipped_quarantine_total: u64,
|
||||||
|
pub(super) floor_mode_switch_total: u64,
|
||||||
|
pub(super) floor_mode_switch_static_to_adaptive_total: u64,
|
||||||
|
pub(super) floor_mode_switch_adaptive_to_static_total: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct ZeroPoolData {
|
||||||
|
pub(super) pool_swap_total: u64,
|
||||||
|
pub(super) pool_drain_active: u64,
|
||||||
|
pub(super) pool_force_close_total: u64,
|
||||||
|
pub(super) pool_stale_pick_total: u64,
|
||||||
|
pub(super) writer_removed_total: u64,
|
||||||
|
pub(super) writer_removed_unexpected_total: u64,
|
||||||
|
pub(super) refill_triggered_total: u64,
|
||||||
|
pub(super) refill_skipped_inflight_total: u64,
|
||||||
|
pub(super) refill_failed_total: u64,
|
||||||
|
pub(super) writer_restored_same_endpoint_total: u64,
|
||||||
|
pub(super) writer_restored_fallback_total: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct ZeroDesyncData {
|
||||||
|
pub(super) secure_padding_invalid_total: u64,
|
||||||
|
pub(super) desync_total: u64,
|
||||||
|
pub(super) desync_full_logged_total: u64,
|
||||||
|
pub(super) desync_suppressed_total: u64,
|
||||||
|
pub(super) desync_frames_bucket_0: u64,
|
||||||
|
pub(super) desync_frames_bucket_1_2: u64,
|
||||||
|
pub(super) desync_frames_bucket_3_10: u64,
|
||||||
|
pub(super) desync_frames_bucket_gt_10: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct ZeroAllData {
|
||||||
|
pub(super) generated_at_epoch_secs: u64,
|
||||||
|
pub(super) core: ZeroCoreData,
|
||||||
|
pub(super) upstream: ZeroUpstreamData,
|
||||||
|
pub(super) middle_proxy: ZeroMiddleProxyData,
|
||||||
|
pub(super) pool: ZeroPoolData,
|
||||||
|
pub(super) desync: ZeroDesyncData,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct MeWritersSummary {
|
||||||
|
pub(super) configured_dc_groups: usize,
|
||||||
|
pub(super) configured_endpoints: usize,
|
||||||
|
pub(super) available_endpoints: usize,
|
||||||
|
pub(super) available_pct: f64,
|
||||||
|
pub(super) required_writers: usize,
|
||||||
|
pub(super) alive_writers: usize,
|
||||||
|
pub(super) coverage_pct: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct MeWriterStatus {
|
||||||
|
pub(super) writer_id: u64,
|
||||||
|
pub(super) dc: Option<i16>,
|
||||||
|
pub(super) endpoint: String,
|
||||||
|
pub(super) generation: u64,
|
||||||
|
pub(super) state: &'static str,
|
||||||
|
pub(super) draining: bool,
|
||||||
|
pub(super) degraded: bool,
|
||||||
|
pub(super) bound_clients: usize,
|
||||||
|
pub(super) idle_for_secs: Option<u64>,
|
||||||
|
pub(super) rtt_ema_ms: Option<f64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct MeWritersData {
|
||||||
|
pub(super) middle_proxy_enabled: bool,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) reason: Option<&'static str>,
|
||||||
|
pub(super) generated_at_epoch_secs: u64,
|
||||||
|
pub(super) summary: MeWritersSummary,
|
||||||
|
pub(super) writers: Vec<MeWriterStatus>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct DcStatus {
|
||||||
|
pub(super) dc: i16,
|
||||||
|
pub(super) endpoints: Vec<String>,
|
||||||
|
pub(super) endpoint_writers: Vec<DcEndpointWriters>,
|
||||||
|
pub(super) available_endpoints: usize,
|
||||||
|
pub(super) available_pct: f64,
|
||||||
|
pub(super) required_writers: usize,
|
||||||
|
pub(super) floor_min: usize,
|
||||||
|
pub(super) floor_target: usize,
|
||||||
|
pub(super) floor_max: usize,
|
||||||
|
pub(super) floor_capped: bool,
|
||||||
|
pub(super) alive_writers: usize,
|
||||||
|
pub(super) coverage_pct: f64,
|
||||||
|
pub(super) rtt_ms: Option<f64>,
|
||||||
|
pub(super) load: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct DcEndpointWriters {
|
||||||
|
pub(super) endpoint: String,
|
||||||
|
pub(super) active_writers: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct DcStatusData {
|
||||||
|
pub(super) middle_proxy_enabled: bool,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) reason: Option<&'static str>,
|
||||||
|
pub(super) generated_at_epoch_secs: u64,
|
||||||
|
pub(super) dcs: Vec<DcStatus>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct MinimalQuarantineData {
|
||||||
|
pub(super) endpoint: String,
|
||||||
|
pub(super) remaining_ms: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct MinimalDcPathData {
|
||||||
|
pub(super) dc: i16,
|
||||||
|
pub(super) ip_preference: Option<&'static str>,
|
||||||
|
pub(super) selected_addr_v4: Option<String>,
|
||||||
|
pub(super) selected_addr_v6: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct MinimalMeRuntimeData {
|
||||||
|
pub(super) active_generation: u64,
|
||||||
|
pub(super) warm_generation: u64,
|
||||||
|
pub(super) pending_hardswap_generation: u64,
|
||||||
|
pub(super) pending_hardswap_age_secs: Option<u64>,
|
||||||
|
pub(super) hardswap_enabled: bool,
|
||||||
|
pub(super) floor_mode: &'static str,
|
||||||
|
pub(super) adaptive_floor_idle_secs: u64,
|
||||||
|
pub(super) adaptive_floor_min_writers_single_endpoint: u8,
|
||||||
|
pub(super) adaptive_floor_min_writers_multi_endpoint: u8,
|
||||||
|
pub(super) adaptive_floor_recover_grace_secs: u64,
|
||||||
|
pub(super) adaptive_floor_writers_per_core_total: u16,
|
||||||
|
pub(super) adaptive_floor_cpu_cores_override: u16,
|
||||||
|
pub(super) adaptive_floor_max_extra_writers_single_per_core: u16,
|
||||||
|
pub(super) adaptive_floor_max_extra_writers_multi_per_core: u16,
|
||||||
|
pub(super) adaptive_floor_max_active_writers_per_core: u16,
|
||||||
|
pub(super) adaptive_floor_max_warm_writers_per_core: u16,
|
||||||
|
pub(super) adaptive_floor_max_active_writers_global: u32,
|
||||||
|
pub(super) adaptive_floor_max_warm_writers_global: u32,
|
||||||
|
pub(super) adaptive_floor_cpu_cores_detected: u32,
|
||||||
|
pub(super) adaptive_floor_cpu_cores_effective: u32,
|
||||||
|
pub(super) adaptive_floor_global_cap_raw: u64,
|
||||||
|
pub(super) adaptive_floor_global_cap_effective: u64,
|
||||||
|
pub(super) adaptive_floor_target_writers_total: u64,
|
||||||
|
pub(super) adaptive_floor_active_cap_configured: u64,
|
||||||
|
pub(super) adaptive_floor_active_cap_effective: u64,
|
||||||
|
pub(super) adaptive_floor_warm_cap_configured: u64,
|
||||||
|
pub(super) adaptive_floor_warm_cap_effective: u64,
|
||||||
|
pub(super) adaptive_floor_active_writers_current: u64,
|
||||||
|
pub(super) adaptive_floor_warm_writers_current: u64,
|
||||||
|
pub(super) me_keepalive_enabled: bool,
|
||||||
|
pub(super) me_keepalive_interval_secs: u64,
|
||||||
|
pub(super) me_keepalive_jitter_secs: u64,
|
||||||
|
pub(super) me_keepalive_payload_random: bool,
|
||||||
|
pub(super) rpc_proxy_req_every_secs: u64,
|
||||||
|
pub(super) me_reconnect_max_concurrent_per_dc: u32,
|
||||||
|
pub(super) me_reconnect_backoff_base_ms: u64,
|
||||||
|
pub(super) me_reconnect_backoff_cap_ms: u64,
|
||||||
|
pub(super) me_reconnect_fast_retry_count: u32,
|
||||||
|
pub(super) me_pool_drain_ttl_secs: u64,
|
||||||
|
pub(super) me_pool_force_close_secs: u64,
|
||||||
|
pub(super) me_pool_min_fresh_ratio: f32,
|
||||||
|
pub(super) me_bind_stale_mode: &'static str,
|
||||||
|
pub(super) me_bind_stale_ttl_secs: u64,
|
||||||
|
pub(super) me_single_endpoint_shadow_writers: u8,
|
||||||
|
pub(super) me_single_endpoint_outage_mode_enabled: bool,
|
||||||
|
pub(super) me_single_endpoint_outage_disable_quarantine: bool,
|
||||||
|
pub(super) me_single_endpoint_outage_backoff_min_ms: u64,
|
||||||
|
pub(super) me_single_endpoint_outage_backoff_max_ms: u64,
|
||||||
|
pub(super) me_single_endpoint_shadow_rotate_every_secs: u64,
|
||||||
|
pub(super) me_deterministic_writer_sort: bool,
|
||||||
|
pub(super) me_writer_pick_mode: &'static str,
|
||||||
|
pub(super) me_writer_pick_sample_size: u8,
|
||||||
|
pub(super) me_socks_kdf_policy: &'static str,
|
||||||
|
pub(super) quarantined_endpoints_total: usize,
|
||||||
|
pub(super) quarantined_endpoints: Vec<MinimalQuarantineData>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct MinimalAllPayload {
|
||||||
|
pub(super) me_writers: MeWritersData,
|
||||||
|
pub(super) dcs: DcStatusData,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) me_runtime: Option<MinimalMeRuntimeData>,
|
||||||
|
pub(super) network_path: Vec<MinimalDcPathData>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Clone)]
|
||||||
|
pub(super) struct MinimalAllData {
|
||||||
|
pub(super) enabled: bool,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) reason: Option<&'static str>,
|
||||||
|
pub(super) generated_at_epoch_secs: u64,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) data: Option<MinimalAllPayload>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct UserLinks {
|
||||||
|
pub(super) classic: Vec<String>,
|
||||||
|
pub(super) secure: Vec<String>,
|
||||||
|
pub(super) tls: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct UserInfo {
|
||||||
|
pub(super) username: String,
|
||||||
|
pub(super) user_ad_tag: Option<String>,
|
||||||
|
pub(super) max_tcp_conns: Option<usize>,
|
||||||
|
pub(super) expiration_rfc3339: Option<String>,
|
||||||
|
pub(super) data_quota_bytes: Option<u64>,
|
||||||
|
pub(super) max_unique_ips: Option<usize>,
|
||||||
|
pub(super) current_connections: u64,
|
||||||
|
pub(super) active_unique_ips: usize,
|
||||||
|
pub(super) active_unique_ips_list: Vec<IpAddr>,
|
||||||
|
pub(super) recent_unique_ips: usize,
|
||||||
|
pub(super) recent_unique_ips_list: Vec<IpAddr>,
|
||||||
|
pub(super) total_octets: u64,
|
||||||
|
pub(super) links: UserLinks,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct CreateUserResponse {
|
||||||
|
pub(super) user: UserInfo,
|
||||||
|
pub(super) secret: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
pub(super) struct CreateUserRequest {
|
||||||
|
pub(super) username: String,
|
||||||
|
pub(super) secret: Option<String>,
|
||||||
|
pub(super) user_ad_tag: Option<String>,
|
||||||
|
pub(super) max_tcp_conns: Option<usize>,
|
||||||
|
pub(super) expiration_rfc3339: Option<String>,
|
||||||
|
pub(super) data_quota_bytes: Option<u64>,
|
||||||
|
pub(super) max_unique_ips: Option<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
pub(super) struct PatchUserRequest {
|
||||||
|
pub(super) secret: Option<String>,
|
||||||
|
pub(super) user_ad_tag: Option<String>,
|
||||||
|
pub(super) max_tcp_conns: Option<usize>,
|
||||||
|
pub(super) expiration_rfc3339: Option<String>,
|
||||||
|
pub(super) data_quota_bytes: Option<u64>,
|
||||||
|
pub(super) max_unique_ips: Option<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default, Deserialize)]
|
||||||
|
pub(super) struct RotateSecretRequest {
|
||||||
|
pub(super) secret: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn parse_optional_expiration(
|
||||||
|
value: Option<&str>,
|
||||||
|
) -> Result<Option<DateTime<Utc>>, ApiFailure> {
|
||||||
|
let Some(raw) = value else {
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
let parsed = DateTime::parse_from_rfc3339(raw)
|
||||||
|
.map_err(|_| ApiFailure::bad_request("expiration_rfc3339 must be valid RFC3339"))?;
|
||||||
|
Ok(Some(parsed.with_timezone(&Utc)))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn is_valid_user_secret(secret: &str) -> bool {
|
||||||
|
secret.len() == 32 && secret.chars().all(|c| c.is_ascii_hexdigit())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn is_valid_ad_tag(tag: &str) -> bool {
|
||||||
|
tag.len() == 32 && tag.chars().all(|c| c.is_ascii_hexdigit())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn is_valid_username(user: &str) -> bool {
|
||||||
|
!user.is_empty()
|
||||||
|
&& user.len() <= MAX_USERNAME_LEN
|
||||||
|
&& user
|
||||||
|
.chars()
|
||||||
|
.all(|ch| ch.is_ascii_alphanumeric() || matches!(ch, '_' | '-' | '.'))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn random_user_secret() -> String {
|
||||||
|
let mut bytes = [0u8; 16];
|
||||||
|
rand::rng().fill(&mut bytes);
|
||||||
|
hex::encode(bytes)
|
||||||
|
}
|
||||||
294
src/api/runtime_edge.rs
Normal file
294
src/api/runtime_edge.rs
Normal file
@@ -0,0 +1,294 @@
|
|||||||
|
use std::cmp::Reverse;
|
||||||
|
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::config::ProxyConfig;
|
||||||
|
|
||||||
|
use super::ApiShared;
|
||||||
|
use super::events::ApiEventRecord;
|
||||||
|
|
||||||
|
const FEATURE_DISABLED_REASON: &str = "feature_disabled";
|
||||||
|
const SOURCE_UNAVAILABLE_REASON: &str = "source_unavailable";
|
||||||
|
const EVENTS_DEFAULT_LIMIT: usize = 50;
|
||||||
|
const EVENTS_MAX_LIMIT: usize = 1000;
|
||||||
|
|
||||||
|
#[derive(Clone, Serialize)]
|
||||||
|
pub(super) struct RuntimeEdgeConnectionUserData {
|
||||||
|
pub(super) username: String,
|
||||||
|
pub(super) current_connections: u64,
|
||||||
|
pub(super) total_octets: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Serialize)]
|
||||||
|
pub(super) struct RuntimeEdgeConnectionTotalsData {
|
||||||
|
pub(super) current_connections: u64,
|
||||||
|
pub(super) current_connections_me: u64,
|
||||||
|
pub(super) current_connections_direct: u64,
|
||||||
|
pub(super) active_users: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Serialize)]
|
||||||
|
pub(super) struct RuntimeEdgeConnectionTopData {
|
||||||
|
pub(super) limit: usize,
|
||||||
|
pub(super) by_connections: Vec<RuntimeEdgeConnectionUserData>,
|
||||||
|
pub(super) by_throughput: Vec<RuntimeEdgeConnectionUserData>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Serialize)]
|
||||||
|
pub(super) struct RuntimeEdgeConnectionCacheData {
|
||||||
|
pub(super) ttl_ms: u64,
|
||||||
|
pub(super) served_from_cache: bool,
|
||||||
|
pub(super) stale_cache_used: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Serialize)]
|
||||||
|
pub(super) struct RuntimeEdgeConnectionTelemetryData {
|
||||||
|
pub(super) user_enabled: bool,
|
||||||
|
pub(super) throughput_is_cumulative: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Serialize)]
|
||||||
|
pub(super) struct RuntimeEdgeConnectionsSummaryPayload {
|
||||||
|
pub(super) cache: RuntimeEdgeConnectionCacheData,
|
||||||
|
pub(super) totals: RuntimeEdgeConnectionTotalsData,
|
||||||
|
pub(super) top: RuntimeEdgeConnectionTopData,
|
||||||
|
pub(super) telemetry: RuntimeEdgeConnectionTelemetryData,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeEdgeConnectionsSummaryData {
|
||||||
|
pub(super) enabled: bool,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) reason: Option<&'static str>,
|
||||||
|
pub(super) generated_at_epoch_secs: u64,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) data: Option<RuntimeEdgeConnectionsSummaryPayload>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(crate) struct EdgeConnectionsCacheEntry {
|
||||||
|
pub(super) expires_at: Instant,
|
||||||
|
pub(super) payload: RuntimeEdgeConnectionsSummaryPayload,
|
||||||
|
pub(super) generated_at_epoch_secs: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeEdgeEventsPayload {
|
||||||
|
pub(super) capacity: usize,
|
||||||
|
pub(super) dropped_total: u64,
|
||||||
|
pub(super) events: Vec<ApiEventRecord>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeEdgeEventsData {
|
||||||
|
pub(super) enabled: bool,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) reason: Option<&'static str>,
|
||||||
|
pub(super) generated_at_epoch_secs: u64,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) data: Option<RuntimeEdgeEventsPayload>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn build_runtime_connections_summary_data(
|
||||||
|
shared: &ApiShared,
|
||||||
|
cfg: &ProxyConfig,
|
||||||
|
) -> RuntimeEdgeConnectionsSummaryData {
|
||||||
|
let now_epoch_secs = now_epoch_secs();
|
||||||
|
let api_cfg = &cfg.server.api;
|
||||||
|
if !api_cfg.runtime_edge_enabled {
|
||||||
|
return RuntimeEdgeConnectionsSummaryData {
|
||||||
|
enabled: false,
|
||||||
|
reason: Some(FEATURE_DISABLED_REASON),
|
||||||
|
generated_at_epoch_secs: now_epoch_secs,
|
||||||
|
data: None,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
let (generated_at_epoch_secs, payload) = match get_connections_payload_cached(
|
||||||
|
shared,
|
||||||
|
api_cfg.runtime_edge_cache_ttl_ms,
|
||||||
|
api_cfg.runtime_edge_top_n,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Some(v) => v,
|
||||||
|
None => {
|
||||||
|
return RuntimeEdgeConnectionsSummaryData {
|
||||||
|
enabled: true,
|
||||||
|
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||||
|
generated_at_epoch_secs: now_epoch_secs,
|
||||||
|
data: None,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
RuntimeEdgeConnectionsSummaryData {
|
||||||
|
enabled: true,
|
||||||
|
reason: None,
|
||||||
|
generated_at_epoch_secs,
|
||||||
|
data: Some(payload),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn build_runtime_events_recent_data(
|
||||||
|
shared: &ApiShared,
|
||||||
|
cfg: &ProxyConfig,
|
||||||
|
query: Option<&str>,
|
||||||
|
) -> RuntimeEdgeEventsData {
|
||||||
|
let now_epoch_secs = now_epoch_secs();
|
||||||
|
let api_cfg = &cfg.server.api;
|
||||||
|
if !api_cfg.runtime_edge_enabled {
|
||||||
|
return RuntimeEdgeEventsData {
|
||||||
|
enabled: false,
|
||||||
|
reason: Some(FEATURE_DISABLED_REASON),
|
||||||
|
generated_at_epoch_secs: now_epoch_secs,
|
||||||
|
data: None,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
let limit = parse_recent_events_limit(query, EVENTS_DEFAULT_LIMIT, EVENTS_MAX_LIMIT);
|
||||||
|
let snapshot = shared.runtime_events.snapshot(limit);
|
||||||
|
|
||||||
|
RuntimeEdgeEventsData {
|
||||||
|
enabled: true,
|
||||||
|
reason: None,
|
||||||
|
generated_at_epoch_secs: now_epoch_secs,
|
||||||
|
data: Some(RuntimeEdgeEventsPayload {
|
||||||
|
capacity: snapshot.capacity,
|
||||||
|
dropped_total: snapshot.dropped_total,
|
||||||
|
events: snapshot.events,
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_connections_payload_cached(
|
||||||
|
shared: &ApiShared,
|
||||||
|
cache_ttl_ms: u64,
|
||||||
|
top_n: usize,
|
||||||
|
) -> Option<(u64, RuntimeEdgeConnectionsSummaryPayload)> {
|
||||||
|
if cache_ttl_ms > 0 {
|
||||||
|
let now = Instant::now();
|
||||||
|
let cached = shared.runtime_edge_connections_cache.lock().await.clone();
|
||||||
|
if let Some(entry) = cached
|
||||||
|
&& now < entry.expires_at
|
||||||
|
{
|
||||||
|
let mut payload = entry.payload;
|
||||||
|
payload.cache.served_from_cache = true;
|
||||||
|
payload.cache.stale_cache_used = false;
|
||||||
|
return Some((entry.generated_at_epoch_secs, payload));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let Ok(_guard) = shared.runtime_edge_recompute_lock.try_lock() else {
|
||||||
|
let cached = shared.runtime_edge_connections_cache.lock().await.clone();
|
||||||
|
if let Some(entry) = cached {
|
||||||
|
let mut payload = entry.payload;
|
||||||
|
payload.cache.served_from_cache = true;
|
||||||
|
payload.cache.stale_cache_used = true;
|
||||||
|
return Some((entry.generated_at_epoch_secs, payload));
|
||||||
|
}
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
|
let generated_at_epoch_secs = now_epoch_secs();
|
||||||
|
let payload = recompute_connections_payload(shared, cache_ttl_ms, top_n).await;
|
||||||
|
|
||||||
|
if cache_ttl_ms > 0 {
|
||||||
|
let entry = EdgeConnectionsCacheEntry {
|
||||||
|
expires_at: Instant::now() + Duration::from_millis(cache_ttl_ms),
|
||||||
|
payload: payload.clone(),
|
||||||
|
generated_at_epoch_secs,
|
||||||
|
};
|
||||||
|
*shared.runtime_edge_connections_cache.lock().await = Some(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
Some((generated_at_epoch_secs, payload))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn recompute_connections_payload(
|
||||||
|
shared: &ApiShared,
|
||||||
|
cache_ttl_ms: u64,
|
||||||
|
top_n: usize,
|
||||||
|
) -> RuntimeEdgeConnectionsSummaryPayload {
|
||||||
|
let mut rows = Vec::<RuntimeEdgeConnectionUserData>::new();
|
||||||
|
let mut active_users = 0usize;
|
||||||
|
for entry in shared.stats.iter_user_stats() {
|
||||||
|
let user_stats = entry.value();
|
||||||
|
let current_connections = user_stats
|
||||||
|
.curr_connects
|
||||||
|
.load(std::sync::atomic::Ordering::Relaxed);
|
||||||
|
let total_octets = user_stats
|
||||||
|
.octets_from_client
|
||||||
|
.load(std::sync::atomic::Ordering::Relaxed)
|
||||||
|
.saturating_add(
|
||||||
|
user_stats
|
||||||
|
.octets_to_client
|
||||||
|
.load(std::sync::atomic::Ordering::Relaxed),
|
||||||
|
);
|
||||||
|
if current_connections > 0 {
|
||||||
|
active_users = active_users.saturating_add(1);
|
||||||
|
}
|
||||||
|
rows.push(RuntimeEdgeConnectionUserData {
|
||||||
|
username: entry.key().clone(),
|
||||||
|
current_connections,
|
||||||
|
total_octets,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let limit = top_n.max(1);
|
||||||
|
let mut by_connections = rows.clone();
|
||||||
|
by_connections.sort_by_key(|row| (Reverse(row.current_connections), row.username.clone()));
|
||||||
|
by_connections.truncate(limit);
|
||||||
|
|
||||||
|
let mut by_throughput = rows;
|
||||||
|
by_throughput.sort_by_key(|row| (Reverse(row.total_octets), row.username.clone()));
|
||||||
|
by_throughput.truncate(limit);
|
||||||
|
|
||||||
|
let telemetry = shared.stats.telemetry_policy();
|
||||||
|
RuntimeEdgeConnectionsSummaryPayload {
|
||||||
|
cache: RuntimeEdgeConnectionCacheData {
|
||||||
|
ttl_ms: cache_ttl_ms,
|
||||||
|
served_from_cache: false,
|
||||||
|
stale_cache_used: false,
|
||||||
|
},
|
||||||
|
totals: RuntimeEdgeConnectionTotalsData {
|
||||||
|
current_connections: shared.stats.get_current_connections_total(),
|
||||||
|
current_connections_me: shared.stats.get_current_connections_me(),
|
||||||
|
current_connections_direct: shared.stats.get_current_connections_direct(),
|
||||||
|
active_users,
|
||||||
|
},
|
||||||
|
top: RuntimeEdgeConnectionTopData {
|
||||||
|
limit,
|
||||||
|
by_connections,
|
||||||
|
by_throughput,
|
||||||
|
},
|
||||||
|
telemetry: RuntimeEdgeConnectionTelemetryData {
|
||||||
|
user_enabled: telemetry.user_enabled,
|
||||||
|
throughput_is_cumulative: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_recent_events_limit(query: Option<&str>, default_limit: usize, max_limit: usize) -> usize {
|
||||||
|
let Some(query) = query else {
|
||||||
|
return default_limit;
|
||||||
|
};
|
||||||
|
for pair in query.split('&') {
|
||||||
|
let mut split = pair.splitn(2, '=');
|
||||||
|
if split.next() == Some("limit")
|
||||||
|
&& let Some(raw) = split.next()
|
||||||
|
&& let Ok(parsed) = raw.parse::<usize>()
|
||||||
|
{
|
||||||
|
return parsed.clamp(1, max_limit);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default_limit
|
||||||
|
}
|
||||||
|
|
||||||
|
fn now_epoch_secs() -> u64 {
|
||||||
|
SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap_or_default()
|
||||||
|
.as_secs()
|
||||||
|
}
|
||||||
186
src/api/runtime_init.rs
Normal file
186
src/api/runtime_init.rs
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::startup::{
|
||||||
|
COMPONENT_ME_CONNECTIVITY_PING, COMPONENT_ME_POOL_CONSTRUCT, COMPONENT_ME_POOL_INIT_STAGE1,
|
||||||
|
COMPONENT_ME_PROXY_CONFIG_V4, COMPONENT_ME_PROXY_CONFIG_V6, COMPONENT_ME_SECRET_FETCH,
|
||||||
|
StartupComponentStatus, StartupMeStatus, compute_progress_pct,
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::ApiShared;
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeInitializationComponentData {
|
||||||
|
pub(super) id: &'static str,
|
||||||
|
pub(super) title: &'static str,
|
||||||
|
pub(super) status: &'static str,
|
||||||
|
pub(super) started_at_epoch_ms: Option<u64>,
|
||||||
|
pub(super) finished_at_epoch_ms: Option<u64>,
|
||||||
|
pub(super) duration_ms: Option<u64>,
|
||||||
|
pub(super) attempts: u32,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) details: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeInitializationMeData {
|
||||||
|
pub(super) status: &'static str,
|
||||||
|
pub(super) current_stage: String,
|
||||||
|
pub(super) progress_pct: f64,
|
||||||
|
pub(super) init_attempt: u32,
|
||||||
|
pub(super) retry_limit: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) last_error: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeInitializationData {
|
||||||
|
pub(super) status: &'static str,
|
||||||
|
pub(super) degraded: bool,
|
||||||
|
pub(super) current_stage: String,
|
||||||
|
pub(super) progress_pct: f64,
|
||||||
|
pub(super) started_at_epoch_secs: u64,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) ready_at_epoch_secs: Option<u64>,
|
||||||
|
pub(super) total_elapsed_ms: u64,
|
||||||
|
pub(super) transport_mode: String,
|
||||||
|
pub(super) me: RuntimeInitializationMeData,
|
||||||
|
pub(super) components: Vec<RuntimeInitializationComponentData>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(super) struct RuntimeStartupSummaryData {
|
||||||
|
pub(super) status: &'static str,
|
||||||
|
pub(super) stage: String,
|
||||||
|
pub(super) progress_pct: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn build_runtime_startup_summary(shared: &ApiShared) -> RuntimeStartupSummaryData {
|
||||||
|
let snapshot = shared.startup_tracker.snapshot().await;
|
||||||
|
let me_pool_progress = current_me_pool_stage_progress(shared).await;
|
||||||
|
let progress_pct = compute_progress_pct(&snapshot, me_pool_progress);
|
||||||
|
RuntimeStartupSummaryData {
|
||||||
|
status: snapshot.status.as_str(),
|
||||||
|
stage: snapshot.current_stage,
|
||||||
|
progress_pct,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn build_runtime_initialization_data(
|
||||||
|
shared: &ApiShared,
|
||||||
|
) -> RuntimeInitializationData {
|
||||||
|
let snapshot = shared.startup_tracker.snapshot().await;
|
||||||
|
let me_pool_progress = current_me_pool_stage_progress(shared).await;
|
||||||
|
let progress_pct = compute_progress_pct(&snapshot, me_pool_progress);
|
||||||
|
let me_progress_pct = compute_me_progress_pct(&snapshot, me_pool_progress);
|
||||||
|
|
||||||
|
RuntimeInitializationData {
|
||||||
|
status: snapshot.status.as_str(),
|
||||||
|
degraded: snapshot.degraded,
|
||||||
|
current_stage: snapshot.current_stage,
|
||||||
|
progress_pct,
|
||||||
|
started_at_epoch_secs: snapshot.started_at_epoch_secs,
|
||||||
|
ready_at_epoch_secs: snapshot.ready_at_epoch_secs,
|
||||||
|
total_elapsed_ms: snapshot.total_elapsed_ms,
|
||||||
|
transport_mode: snapshot.transport_mode,
|
||||||
|
me: RuntimeInitializationMeData {
|
||||||
|
status: snapshot.me.status.as_str(),
|
||||||
|
current_stage: snapshot.me.current_stage,
|
||||||
|
progress_pct: me_progress_pct,
|
||||||
|
init_attempt: snapshot.me.init_attempt,
|
||||||
|
retry_limit: snapshot.me.retry_limit,
|
||||||
|
last_error: snapshot.me.last_error,
|
||||||
|
},
|
||||||
|
components: snapshot
|
||||||
|
.components
|
||||||
|
.into_iter()
|
||||||
|
.map(|component| RuntimeInitializationComponentData {
|
||||||
|
id: component.id,
|
||||||
|
title: component.title,
|
||||||
|
status: component.status.as_str(),
|
||||||
|
started_at_epoch_ms: component.started_at_epoch_ms,
|
||||||
|
finished_at_epoch_ms: component.finished_at_epoch_ms,
|
||||||
|
duration_ms: component.duration_ms,
|
||||||
|
attempts: component.attempts,
|
||||||
|
details: component.details,
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_me_progress_pct(
|
||||||
|
snapshot: &crate::startup::StartupSnapshot,
|
||||||
|
me_pool_progress: Option<f64>,
|
||||||
|
) -> f64 {
|
||||||
|
match snapshot.me.status {
|
||||||
|
StartupMeStatus::Pending => 0.0,
|
||||||
|
StartupMeStatus::Ready | StartupMeStatus::Failed | StartupMeStatus::Skipped => 100.0,
|
||||||
|
StartupMeStatus::Initializing => {
|
||||||
|
let mut total_weight = 0.0f64;
|
||||||
|
let mut completed_weight = 0.0f64;
|
||||||
|
for component in &snapshot.components {
|
||||||
|
if !is_me_component(component.id) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
total_weight += component.weight;
|
||||||
|
let unit_progress = match component.status {
|
||||||
|
StartupComponentStatus::Pending => 0.0,
|
||||||
|
StartupComponentStatus::Running => {
|
||||||
|
if component.id == COMPONENT_ME_POOL_INIT_STAGE1 {
|
||||||
|
me_pool_progress.unwrap_or(0.0).clamp(0.0, 1.0)
|
||||||
|
} else {
|
||||||
|
0.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
StartupComponentStatus::Ready
|
||||||
|
| StartupComponentStatus::Failed
|
||||||
|
| StartupComponentStatus::Skipped => 1.0,
|
||||||
|
};
|
||||||
|
completed_weight += component.weight * unit_progress;
|
||||||
|
}
|
||||||
|
if total_weight <= f64::EPSILON {
|
||||||
|
0.0
|
||||||
|
} else {
|
||||||
|
((completed_weight / total_weight) * 100.0).clamp(0.0, 100.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_me_component(component_id: &str) -> bool {
|
||||||
|
matches!(
|
||||||
|
component_id,
|
||||||
|
COMPONENT_ME_SECRET_FETCH
|
||||||
|
| COMPONENT_ME_PROXY_CONFIG_V4
|
||||||
|
| COMPONENT_ME_PROXY_CONFIG_V6
|
||||||
|
| COMPONENT_ME_POOL_CONSTRUCT
|
||||||
|
| COMPONENT_ME_POOL_INIT_STAGE1
|
||||||
|
| COMPONENT_ME_CONNECTIVITY_PING
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn current_me_pool_stage_progress(shared: &ApiShared) -> Option<f64> {
|
||||||
|
let snapshot = shared.startup_tracker.snapshot().await;
|
||||||
|
if snapshot.me.status != StartupMeStatus::Initializing {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let pool = shared.me_pool.read().await.clone()?;
|
||||||
|
let status = pool.api_status_snapshot().await;
|
||||||
|
let configured_dc_groups = status.configured_dc_groups;
|
||||||
|
let covered_dc_groups = status
|
||||||
|
.dcs
|
||||||
|
.iter()
|
||||||
|
.filter(|dc| dc.alive_writers > 0)
|
||||||
|
.count();
|
||||||
|
|
||||||
|
let dc_coverage = ratio_01(covered_dc_groups, configured_dc_groups);
|
||||||
|
let writer_coverage = ratio_01(status.alive_writers, status.required_writers);
|
||||||
|
Some((0.7 * dc_coverage + 0.3 * writer_coverage).clamp(0.0, 1.0))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ratio_01(part: usize, total: usize) -> f64 {
|
||||||
|
if total == 0 {
|
||||||
|
return 0.0;
|
||||||
|
}
|
||||||
|
((part as f64) / (total as f64)).clamp(0.0, 1.0)
|
||||||
|
}
|
||||||
534
src/api/runtime_min.rs
Normal file
534
src/api/runtime_min.rs
Normal file
@@ -0,0 +1,534 @@
|
|||||||
|
use std::collections::BTreeSet;
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::config::ProxyConfig;
|
||||||
|
|
||||||
|
use super::ApiShared;
|
||||||
|
|
||||||
|
const SOURCE_UNAVAILABLE_REASON: &str = "source_unavailable";
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct SecurityWhitelistData {
|
||||||
|
pub(super) generated_at_epoch_secs: u64,
|
||||||
|
pub(super) enabled: bool,
|
||||||
|
pub(super) entries_total: usize,
|
||||||
|
pub(super) entries: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMePoolStateGenerationData {
|
||||||
|
pub(super) active_generation: u64,
|
||||||
|
pub(super) warm_generation: u64,
|
||||||
|
pub(super) pending_hardswap_generation: u64,
|
||||||
|
pub(super) pending_hardswap_age_secs: Option<u64>,
|
||||||
|
pub(super) draining_generations: Vec<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMePoolStateHardswapData {
|
||||||
|
pub(super) enabled: bool,
|
||||||
|
pub(super) pending: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMePoolStateWriterContourData {
|
||||||
|
pub(super) warm: usize,
|
||||||
|
pub(super) active: usize,
|
||||||
|
pub(super) draining: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMePoolStateWriterHealthData {
|
||||||
|
pub(super) healthy: usize,
|
||||||
|
pub(super) degraded: usize,
|
||||||
|
pub(super) draining: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMePoolStateWriterData {
|
||||||
|
pub(super) total: usize,
|
||||||
|
pub(super) alive_non_draining: usize,
|
||||||
|
pub(super) draining: usize,
|
||||||
|
pub(super) degraded: usize,
|
||||||
|
pub(super) contour: RuntimeMePoolStateWriterContourData,
|
||||||
|
pub(super) health: RuntimeMePoolStateWriterHealthData,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMePoolStateRefillDcData {
|
||||||
|
pub(super) dc: i16,
|
||||||
|
pub(super) family: &'static str,
|
||||||
|
pub(super) inflight: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMePoolStateRefillData {
|
||||||
|
pub(super) inflight_endpoints_total: usize,
|
||||||
|
pub(super) inflight_dc_total: usize,
|
||||||
|
pub(super) by_dc: Vec<RuntimeMePoolStateRefillDcData>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMePoolStatePayload {
|
||||||
|
pub(super) generations: RuntimeMePoolStateGenerationData,
|
||||||
|
pub(super) hardswap: RuntimeMePoolStateHardswapData,
|
||||||
|
pub(super) writers: RuntimeMePoolStateWriterData,
|
||||||
|
pub(super) refill: RuntimeMePoolStateRefillData,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMePoolStateData {
|
||||||
|
pub(super) enabled: bool,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) reason: Option<&'static str>,
|
||||||
|
pub(super) generated_at_epoch_secs: u64,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) data: Option<RuntimeMePoolStatePayload>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMeQualityCountersData {
|
||||||
|
pub(super) idle_close_by_peer_total: u64,
|
||||||
|
pub(super) reader_eof_total: u64,
|
||||||
|
pub(super) kdf_drift_total: u64,
|
||||||
|
pub(super) kdf_port_only_drift_total: u64,
|
||||||
|
pub(super) reconnect_attempt_total: u64,
|
||||||
|
pub(super) reconnect_success_total: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMeQualityRouteDropData {
|
||||||
|
pub(super) no_conn_total: u64,
|
||||||
|
pub(super) channel_closed_total: u64,
|
||||||
|
pub(super) queue_full_total: u64,
|
||||||
|
pub(super) queue_full_base_total: u64,
|
||||||
|
pub(super) queue_full_high_total: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMeQualityDcRttData {
|
||||||
|
pub(super) dc: i16,
|
||||||
|
pub(super) rtt_ema_ms: Option<f64>,
|
||||||
|
pub(super) alive_writers: usize,
|
||||||
|
pub(super) required_writers: usize,
|
||||||
|
pub(super) coverage_pct: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMeQualityPayload {
|
||||||
|
pub(super) counters: RuntimeMeQualityCountersData,
|
||||||
|
pub(super) route_drops: RuntimeMeQualityRouteDropData,
|
||||||
|
pub(super) dc_rtt: Vec<RuntimeMeQualityDcRttData>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMeQualityData {
|
||||||
|
pub(super) enabled: bool,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) reason: Option<&'static str>,
|
||||||
|
pub(super) generated_at_epoch_secs: u64,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) data: Option<RuntimeMeQualityPayload>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeUpstreamQualityPolicyData {
|
||||||
|
pub(super) connect_retry_attempts: u32,
|
||||||
|
pub(super) connect_retry_backoff_ms: u64,
|
||||||
|
pub(super) connect_budget_ms: u64,
|
||||||
|
pub(super) unhealthy_fail_threshold: u32,
|
||||||
|
pub(super) connect_failfast_hard_errors: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeUpstreamQualityCountersData {
|
||||||
|
pub(super) connect_attempt_total: u64,
|
||||||
|
pub(super) connect_success_total: u64,
|
||||||
|
pub(super) connect_fail_total: u64,
|
||||||
|
pub(super) connect_failfast_hard_error_total: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeUpstreamQualitySummaryData {
|
||||||
|
pub(super) configured_total: usize,
|
||||||
|
pub(super) healthy_total: usize,
|
||||||
|
pub(super) unhealthy_total: usize,
|
||||||
|
pub(super) direct_total: usize,
|
||||||
|
pub(super) socks4_total: usize,
|
||||||
|
pub(super) socks5_total: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeUpstreamQualityDcData {
|
||||||
|
pub(super) dc: i16,
|
||||||
|
pub(super) latency_ema_ms: Option<f64>,
|
||||||
|
pub(super) ip_preference: &'static str,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeUpstreamQualityUpstreamData {
|
||||||
|
pub(super) upstream_id: usize,
|
||||||
|
pub(super) route_kind: &'static str,
|
||||||
|
pub(super) address: String,
|
||||||
|
pub(super) weight: u16,
|
||||||
|
pub(super) scopes: String,
|
||||||
|
pub(super) healthy: bool,
|
||||||
|
pub(super) fails: u32,
|
||||||
|
pub(super) last_check_age_secs: u64,
|
||||||
|
pub(super) effective_latency_ms: Option<f64>,
|
||||||
|
pub(super) dc: Vec<RuntimeUpstreamQualityDcData>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeUpstreamQualityData {
|
||||||
|
pub(super) enabled: bool,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) reason: Option<&'static str>,
|
||||||
|
pub(super) generated_at_epoch_secs: u64,
|
||||||
|
pub(super) policy: RuntimeUpstreamQualityPolicyData,
|
||||||
|
pub(super) counters: RuntimeUpstreamQualityCountersData,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) summary: Option<RuntimeUpstreamQualitySummaryData>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) upstreams: Option<Vec<RuntimeUpstreamQualityUpstreamData>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeNatStunReflectionData {
|
||||||
|
pub(super) addr: String,
|
||||||
|
pub(super) age_secs: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeNatStunFlagsData {
|
||||||
|
pub(super) nat_probe_enabled: bool,
|
||||||
|
pub(super) nat_probe_disabled_runtime: bool,
|
||||||
|
pub(super) nat_probe_attempts: u8,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeNatStunServersData {
|
||||||
|
pub(super) configured: Vec<String>,
|
||||||
|
pub(super) live: Vec<String>,
|
||||||
|
pub(super) live_total: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeNatStunReflectionBlockData {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) v4: Option<RuntimeNatStunReflectionData>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) v6: Option<RuntimeNatStunReflectionData>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeNatStunPayload {
|
||||||
|
pub(super) flags: RuntimeNatStunFlagsData,
|
||||||
|
pub(super) servers: RuntimeNatStunServersData,
|
||||||
|
pub(super) reflection: RuntimeNatStunReflectionBlockData,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) stun_backoff_remaining_ms: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeNatStunData {
|
||||||
|
pub(super) enabled: bool,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) reason: Option<&'static str>,
|
||||||
|
pub(super) generated_at_epoch_secs: u64,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) data: Option<RuntimeNatStunPayload>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn build_security_whitelist_data(cfg: &ProxyConfig) -> SecurityWhitelistData {
|
||||||
|
let entries = cfg
|
||||||
|
.server
|
||||||
|
.api
|
||||||
|
.whitelist
|
||||||
|
.iter()
|
||||||
|
.map(ToString::to_string)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
SecurityWhitelistData {
|
||||||
|
generated_at_epoch_secs: now_epoch_secs(),
|
||||||
|
enabled: !entries.is_empty(),
|
||||||
|
entries_total: entries.len(),
|
||||||
|
entries,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn build_runtime_me_pool_state_data(shared: &ApiShared) -> RuntimeMePoolStateData {
|
||||||
|
let now_epoch_secs = now_epoch_secs();
|
||||||
|
let Some(pool) = shared.me_pool.read().await.clone() else {
|
||||||
|
return RuntimeMePoolStateData {
|
||||||
|
enabled: false,
|
||||||
|
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||||
|
generated_at_epoch_secs: now_epoch_secs,
|
||||||
|
data: None,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
let status = pool.api_status_snapshot().await;
|
||||||
|
let runtime = pool.api_runtime_snapshot().await;
|
||||||
|
let refill = pool.api_refill_snapshot().await;
|
||||||
|
|
||||||
|
let mut draining_generations = BTreeSet::<u64>::new();
|
||||||
|
let mut contour_warm = 0usize;
|
||||||
|
let mut contour_active = 0usize;
|
||||||
|
let mut contour_draining = 0usize;
|
||||||
|
let mut draining = 0usize;
|
||||||
|
let mut degraded = 0usize;
|
||||||
|
let mut healthy = 0usize;
|
||||||
|
|
||||||
|
for writer in &status.writers {
|
||||||
|
if writer.draining {
|
||||||
|
draining_generations.insert(writer.generation);
|
||||||
|
draining += 1;
|
||||||
|
}
|
||||||
|
if writer.degraded && !writer.draining {
|
||||||
|
degraded += 1;
|
||||||
|
}
|
||||||
|
if !writer.degraded && !writer.draining {
|
||||||
|
healthy += 1;
|
||||||
|
}
|
||||||
|
match writer.state {
|
||||||
|
"warm" => contour_warm += 1,
|
||||||
|
"active" => contour_active += 1,
|
||||||
|
_ => contour_draining += 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RuntimeMePoolStateData {
|
||||||
|
enabled: true,
|
||||||
|
reason: None,
|
||||||
|
generated_at_epoch_secs: status.generated_at_epoch_secs,
|
||||||
|
data: Some(RuntimeMePoolStatePayload {
|
||||||
|
generations: RuntimeMePoolStateGenerationData {
|
||||||
|
active_generation: runtime.active_generation,
|
||||||
|
warm_generation: runtime.warm_generation,
|
||||||
|
pending_hardswap_generation: runtime.pending_hardswap_generation,
|
||||||
|
pending_hardswap_age_secs: runtime.pending_hardswap_age_secs,
|
||||||
|
draining_generations: draining_generations.into_iter().collect(),
|
||||||
|
},
|
||||||
|
hardswap: RuntimeMePoolStateHardswapData {
|
||||||
|
enabled: runtime.hardswap_enabled,
|
||||||
|
pending: runtime.pending_hardswap_generation != 0,
|
||||||
|
},
|
||||||
|
writers: RuntimeMePoolStateWriterData {
|
||||||
|
total: status.writers.len(),
|
||||||
|
alive_non_draining: status.writers.len().saturating_sub(draining),
|
||||||
|
draining,
|
||||||
|
degraded,
|
||||||
|
contour: RuntimeMePoolStateWriterContourData {
|
||||||
|
warm: contour_warm,
|
||||||
|
active: contour_active,
|
||||||
|
draining: contour_draining,
|
||||||
|
},
|
||||||
|
health: RuntimeMePoolStateWriterHealthData {
|
||||||
|
healthy,
|
||||||
|
degraded,
|
||||||
|
draining,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
refill: RuntimeMePoolStateRefillData {
|
||||||
|
inflight_endpoints_total: refill.inflight_endpoints_total,
|
||||||
|
inflight_dc_total: refill.inflight_dc_total,
|
||||||
|
by_dc: refill
|
||||||
|
.by_dc
|
||||||
|
.into_iter()
|
||||||
|
.map(|entry| RuntimeMePoolStateRefillDcData {
|
||||||
|
dc: entry.dc,
|
||||||
|
family: entry.family,
|
||||||
|
inflight: entry.inflight,
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn build_runtime_me_quality_data(shared: &ApiShared) -> RuntimeMeQualityData {
|
||||||
|
let now_epoch_secs = now_epoch_secs();
|
||||||
|
let Some(pool) = shared.me_pool.read().await.clone() else {
|
||||||
|
return RuntimeMeQualityData {
|
||||||
|
enabled: false,
|
||||||
|
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||||
|
generated_at_epoch_secs: now_epoch_secs,
|
||||||
|
data: None,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
let status = pool.api_status_snapshot().await;
|
||||||
|
RuntimeMeQualityData {
|
||||||
|
enabled: true,
|
||||||
|
reason: None,
|
||||||
|
generated_at_epoch_secs: status.generated_at_epoch_secs,
|
||||||
|
data: Some(RuntimeMeQualityPayload {
|
||||||
|
counters: RuntimeMeQualityCountersData {
|
||||||
|
idle_close_by_peer_total: shared.stats.get_me_idle_close_by_peer_total(),
|
||||||
|
reader_eof_total: shared.stats.get_me_reader_eof_total(),
|
||||||
|
kdf_drift_total: shared.stats.get_me_kdf_drift_total(),
|
||||||
|
kdf_port_only_drift_total: shared.stats.get_me_kdf_port_only_drift_total(),
|
||||||
|
reconnect_attempt_total: shared.stats.get_me_reconnect_attempts(),
|
||||||
|
reconnect_success_total: shared.stats.get_me_reconnect_success(),
|
||||||
|
},
|
||||||
|
route_drops: RuntimeMeQualityRouteDropData {
|
||||||
|
no_conn_total: shared.stats.get_me_route_drop_no_conn(),
|
||||||
|
channel_closed_total: shared.stats.get_me_route_drop_channel_closed(),
|
||||||
|
queue_full_total: shared.stats.get_me_route_drop_queue_full(),
|
||||||
|
queue_full_base_total: shared.stats.get_me_route_drop_queue_full_base(),
|
||||||
|
queue_full_high_total: shared.stats.get_me_route_drop_queue_full_high(),
|
||||||
|
},
|
||||||
|
dc_rtt: status
|
||||||
|
.dcs
|
||||||
|
.into_iter()
|
||||||
|
.map(|dc| RuntimeMeQualityDcRttData {
|
||||||
|
dc: dc.dc,
|
||||||
|
rtt_ema_ms: dc.rtt_ms,
|
||||||
|
alive_writers: dc.alive_writers,
|
||||||
|
required_writers: dc.required_writers,
|
||||||
|
coverage_pct: dc.coverage_pct,
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn build_runtime_upstream_quality_data(
|
||||||
|
shared: &ApiShared,
|
||||||
|
) -> RuntimeUpstreamQualityData {
|
||||||
|
let generated_at_epoch_secs = now_epoch_secs();
|
||||||
|
let policy = shared.upstream_manager.api_policy_snapshot();
|
||||||
|
let counters = RuntimeUpstreamQualityCountersData {
|
||||||
|
connect_attempt_total: shared.stats.get_upstream_connect_attempt_total(),
|
||||||
|
connect_success_total: shared.stats.get_upstream_connect_success_total(),
|
||||||
|
connect_fail_total: shared.stats.get_upstream_connect_fail_total(),
|
||||||
|
connect_failfast_hard_error_total: shared.stats.get_upstream_connect_failfast_hard_error_total(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some(snapshot) = shared.upstream_manager.try_api_snapshot() else {
|
||||||
|
return RuntimeUpstreamQualityData {
|
||||||
|
enabled: false,
|
||||||
|
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||||
|
generated_at_epoch_secs,
|
||||||
|
policy: RuntimeUpstreamQualityPolicyData {
|
||||||
|
connect_retry_attempts: policy.connect_retry_attempts,
|
||||||
|
connect_retry_backoff_ms: policy.connect_retry_backoff_ms,
|
||||||
|
connect_budget_ms: policy.connect_budget_ms,
|
||||||
|
unhealthy_fail_threshold: policy.unhealthy_fail_threshold,
|
||||||
|
connect_failfast_hard_errors: policy.connect_failfast_hard_errors,
|
||||||
|
},
|
||||||
|
counters,
|
||||||
|
summary: None,
|
||||||
|
upstreams: None,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
RuntimeUpstreamQualityData {
|
||||||
|
enabled: true,
|
||||||
|
reason: None,
|
||||||
|
generated_at_epoch_secs,
|
||||||
|
policy: RuntimeUpstreamQualityPolicyData {
|
||||||
|
connect_retry_attempts: policy.connect_retry_attempts,
|
||||||
|
connect_retry_backoff_ms: policy.connect_retry_backoff_ms,
|
||||||
|
connect_budget_ms: policy.connect_budget_ms,
|
||||||
|
unhealthy_fail_threshold: policy.unhealthy_fail_threshold,
|
||||||
|
connect_failfast_hard_errors: policy.connect_failfast_hard_errors,
|
||||||
|
},
|
||||||
|
counters,
|
||||||
|
summary: Some(RuntimeUpstreamQualitySummaryData {
|
||||||
|
configured_total: snapshot.summary.configured_total,
|
||||||
|
healthy_total: snapshot.summary.healthy_total,
|
||||||
|
unhealthy_total: snapshot.summary.unhealthy_total,
|
||||||
|
direct_total: snapshot.summary.direct_total,
|
||||||
|
socks4_total: snapshot.summary.socks4_total,
|
||||||
|
socks5_total: snapshot.summary.socks5_total,
|
||||||
|
}),
|
||||||
|
upstreams: Some(
|
||||||
|
snapshot
|
||||||
|
.upstreams
|
||||||
|
.into_iter()
|
||||||
|
.map(|upstream| RuntimeUpstreamQualityUpstreamData {
|
||||||
|
upstream_id: upstream.upstream_id,
|
||||||
|
route_kind: match upstream.route_kind {
|
||||||
|
crate::transport::UpstreamRouteKind::Direct => "direct",
|
||||||
|
crate::transport::UpstreamRouteKind::Socks4 => "socks4",
|
||||||
|
crate::transport::UpstreamRouteKind::Socks5 => "socks5",
|
||||||
|
},
|
||||||
|
address: upstream.address,
|
||||||
|
weight: upstream.weight,
|
||||||
|
scopes: upstream.scopes,
|
||||||
|
healthy: upstream.healthy,
|
||||||
|
fails: upstream.fails,
|
||||||
|
last_check_age_secs: upstream.last_check_age_secs,
|
||||||
|
effective_latency_ms: upstream.effective_latency_ms,
|
||||||
|
dc: upstream
|
||||||
|
.dc
|
||||||
|
.into_iter()
|
||||||
|
.map(|dc| RuntimeUpstreamQualityDcData {
|
||||||
|
dc: dc.dc,
|
||||||
|
latency_ema_ms: dc.latency_ema_ms,
|
||||||
|
ip_preference: match dc.ip_preference {
|
||||||
|
crate::transport::upstream::IpPreference::Unknown => "unknown",
|
||||||
|
crate::transport::upstream::IpPreference::PreferV6 => "prefer_v6",
|
||||||
|
crate::transport::upstream::IpPreference::PreferV4 => "prefer_v4",
|
||||||
|
crate::transport::upstream::IpPreference::BothWork => "both_work",
|
||||||
|
crate::transport::upstream::IpPreference::Unavailable => "unavailable",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn build_runtime_nat_stun_data(shared: &ApiShared) -> RuntimeNatStunData {
|
||||||
|
let now_epoch_secs = now_epoch_secs();
|
||||||
|
let Some(pool) = shared.me_pool.read().await.clone() else {
|
||||||
|
return RuntimeNatStunData {
|
||||||
|
enabled: false,
|
||||||
|
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||||
|
generated_at_epoch_secs: now_epoch_secs,
|
||||||
|
data: None,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
let snapshot = pool.api_nat_stun_snapshot().await;
|
||||||
|
RuntimeNatStunData {
|
||||||
|
enabled: true,
|
||||||
|
reason: None,
|
||||||
|
generated_at_epoch_secs: now_epoch_secs,
|
||||||
|
data: Some(RuntimeNatStunPayload {
|
||||||
|
flags: RuntimeNatStunFlagsData {
|
||||||
|
nat_probe_enabled: snapshot.nat_probe_enabled,
|
||||||
|
nat_probe_disabled_runtime: snapshot.nat_probe_disabled_runtime,
|
||||||
|
nat_probe_attempts: snapshot.nat_probe_attempts,
|
||||||
|
},
|
||||||
|
servers: RuntimeNatStunServersData {
|
||||||
|
configured: snapshot.configured_servers,
|
||||||
|
live: snapshot.live_servers.clone(),
|
||||||
|
live_total: snapshot.live_servers.len(),
|
||||||
|
},
|
||||||
|
reflection: RuntimeNatStunReflectionBlockData {
|
||||||
|
v4: snapshot.reflection_v4.map(|entry| RuntimeNatStunReflectionData {
|
||||||
|
addr: entry.addr.to_string(),
|
||||||
|
age_secs: entry.age_secs,
|
||||||
|
}),
|
||||||
|
v6: snapshot.reflection_v6.map(|entry| RuntimeNatStunReflectionData {
|
||||||
|
addr: entry.addr.to_string(),
|
||||||
|
age_secs: entry.age_secs,
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
stun_backoff_remaining_ms: snapshot.stun_backoff_remaining_ms,
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn now_epoch_secs() -> u64 {
|
||||||
|
SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap_or_default()
|
||||||
|
.as_secs()
|
||||||
|
}
|
||||||
299
src/api/runtime_selftest.rs
Normal file
299
src/api/runtime_selftest.rs
Normal file
@@ -0,0 +1,299 @@
|
|||||||
|
use std::net::IpAddr;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::sync::{Mutex, OnceLock};
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::config::{ProxyConfig, UpstreamType};
|
||||||
|
use crate::network::probe::{detect_interface_ipv4, detect_interface_ipv6, is_bogon};
|
||||||
|
use crate::transport::middle_proxy::{bnd_snapshot, timeskew_snapshot, upstream_bnd_snapshots};
|
||||||
|
use crate::transport::UpstreamRouteKind;
|
||||||
|
|
||||||
|
use super::ApiShared;
|
||||||
|
|
||||||
|
const SOURCE_UNAVAILABLE_REASON: &str = "source_unavailable";
|
||||||
|
const KDF_EWMA_TAU_SECS: f64 = 600.0;
|
||||||
|
const KDF_EWMA_THRESHOLD_ERRORS_PER_MIN: f64 = 0.30;
|
||||||
|
const TIMESKEW_THRESHOLD_SECS: u64 = 60;
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMeSelftestKdfData {
|
||||||
|
pub(super) state: &'static str,
|
||||||
|
pub(super) ewma_errors_per_min: f64,
|
||||||
|
pub(super) threshold_errors_per_min: f64,
|
||||||
|
pub(super) errors_total: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMeSelftestTimeskewData {
|
||||||
|
pub(super) state: &'static str,
|
||||||
|
pub(super) max_skew_secs_15m: Option<u64>,
|
||||||
|
pub(super) samples_15m: usize,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) last_skew_secs: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) last_source: Option<&'static str>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) last_seen_age_secs: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMeSelftestIpFamilyData {
|
||||||
|
pub(super) addr: String,
|
||||||
|
pub(super) state: &'static str,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMeSelftestIpData {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) v4: Option<RuntimeMeSelftestIpFamilyData>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) v6: Option<RuntimeMeSelftestIpFamilyData>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMeSelftestPidData {
|
||||||
|
pub(super) pid: u32,
|
||||||
|
pub(super) state: &'static str,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMeSelftestBndData {
|
||||||
|
pub(super) addr_state: &'static str,
|
||||||
|
pub(super) port_state: &'static str,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) last_addr: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) last_seen_age_secs: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMeSelftestUpstreamData {
|
||||||
|
pub(super) upstream_id: usize,
|
||||||
|
pub(super) route_kind: &'static str,
|
||||||
|
pub(super) address: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) bnd: Option<RuntimeMeSelftestBndData>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) ip: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMeSelftestPayload {
|
||||||
|
pub(super) kdf: RuntimeMeSelftestKdfData,
|
||||||
|
pub(super) timeskew: RuntimeMeSelftestTimeskewData,
|
||||||
|
pub(super) ip: RuntimeMeSelftestIpData,
|
||||||
|
pub(super) pid: RuntimeMeSelftestPidData,
|
||||||
|
pub(super) bnd: Option<RuntimeMeSelftestBndData>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) upstreams: Option<Vec<RuntimeMeSelftestUpstreamData>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeMeSelftestData {
|
||||||
|
pub(super) enabled: bool,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) reason: Option<&'static str>,
|
||||||
|
pub(super) generated_at_epoch_secs: u64,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) data: Option<RuntimeMeSelftestPayload>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
struct KdfEwmaState {
|
||||||
|
initialized: bool,
|
||||||
|
last_epoch_secs: u64,
|
||||||
|
last_total_errors: u64,
|
||||||
|
ewma_errors_per_min: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
static KDF_EWMA_STATE: OnceLock<Mutex<KdfEwmaState>> = OnceLock::new();
|
||||||
|
|
||||||
|
fn kdf_ewma_state() -> &'static Mutex<KdfEwmaState> {
|
||||||
|
KDF_EWMA_STATE.get_or_init(|| Mutex::new(KdfEwmaState::default()))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn build_runtime_me_selftest_data(
|
||||||
|
shared: &ApiShared,
|
||||||
|
cfg: &ProxyConfig,
|
||||||
|
) -> RuntimeMeSelftestData {
|
||||||
|
let now_epoch_secs = now_epoch_secs();
|
||||||
|
if shared.me_pool.read().await.is_none() {
|
||||||
|
return RuntimeMeSelftestData {
|
||||||
|
enabled: false,
|
||||||
|
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||||
|
generated_at_epoch_secs: now_epoch_secs,
|
||||||
|
data: None,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
let kdf_errors_total = shared
|
||||||
|
.stats
|
||||||
|
.get_me_kdf_drift_total()
|
||||||
|
.saturating_add(shared.stats.get_me_socks_kdf_strict_reject());
|
||||||
|
let kdf_ewma = update_kdf_ewma(now_epoch_secs, kdf_errors_total);
|
||||||
|
let kdf_state = if kdf_ewma >= KDF_EWMA_THRESHOLD_ERRORS_PER_MIN {
|
||||||
|
"error"
|
||||||
|
} else {
|
||||||
|
"ok"
|
||||||
|
};
|
||||||
|
|
||||||
|
let skew = timeskew_snapshot();
|
||||||
|
let timeskew_state = if skew.max_skew_secs_15m.unwrap_or(0) > TIMESKEW_THRESHOLD_SECS {
|
||||||
|
"error"
|
||||||
|
} else {
|
||||||
|
"ok"
|
||||||
|
};
|
||||||
|
|
||||||
|
let ip_v4 = detect_interface_ipv4().map(|ip| RuntimeMeSelftestIpFamilyData {
|
||||||
|
addr: ip.to_string(),
|
||||||
|
state: classify_ip(IpAddr::V4(ip)),
|
||||||
|
});
|
||||||
|
let ip_v6 = detect_interface_ipv6().map(|ip| RuntimeMeSelftestIpFamilyData {
|
||||||
|
addr: ip.to_string(),
|
||||||
|
state: classify_ip(IpAddr::V6(ip)),
|
||||||
|
});
|
||||||
|
|
||||||
|
let pid = std::process::id();
|
||||||
|
let pid_state = if pid == 1 { "one" } else { "non-one" };
|
||||||
|
|
||||||
|
let has_socks_upstreams = cfg.upstreams.iter().any(|upstream| {
|
||||||
|
upstream.enabled
|
||||||
|
&& matches!(
|
||||||
|
upstream.upstream_type,
|
||||||
|
UpstreamType::Socks4 { .. } | UpstreamType::Socks5 { .. }
|
||||||
|
)
|
||||||
|
});
|
||||||
|
|
||||||
|
let bnd = if has_socks_upstreams {
|
||||||
|
let snapshot = bnd_snapshot();
|
||||||
|
Some(RuntimeMeSelftestBndData {
|
||||||
|
addr_state: snapshot.addr_status,
|
||||||
|
port_state: snapshot.port_status,
|
||||||
|
last_addr: snapshot.last_addr.map(|value| value.to_string()),
|
||||||
|
last_seen_age_secs: snapshot.last_seen_age_secs,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
let upstreams = build_upstream_selftest_data(shared);
|
||||||
|
|
||||||
|
RuntimeMeSelftestData {
|
||||||
|
enabled: true,
|
||||||
|
reason: None,
|
||||||
|
generated_at_epoch_secs: now_epoch_secs,
|
||||||
|
data: Some(RuntimeMeSelftestPayload {
|
||||||
|
kdf: RuntimeMeSelftestKdfData {
|
||||||
|
state: kdf_state,
|
||||||
|
ewma_errors_per_min: round3(kdf_ewma),
|
||||||
|
threshold_errors_per_min: KDF_EWMA_THRESHOLD_ERRORS_PER_MIN,
|
||||||
|
errors_total: kdf_errors_total,
|
||||||
|
},
|
||||||
|
timeskew: RuntimeMeSelftestTimeskewData {
|
||||||
|
state: timeskew_state,
|
||||||
|
max_skew_secs_15m: skew.max_skew_secs_15m,
|
||||||
|
samples_15m: skew.samples_15m,
|
||||||
|
last_skew_secs: skew.last_skew_secs,
|
||||||
|
last_source: skew.last_source,
|
||||||
|
last_seen_age_secs: skew.last_seen_age_secs,
|
||||||
|
},
|
||||||
|
ip: RuntimeMeSelftestIpData {
|
||||||
|
v4: ip_v4,
|
||||||
|
v6: ip_v6,
|
||||||
|
},
|
||||||
|
pid: RuntimeMeSelftestPidData {
|
||||||
|
pid,
|
||||||
|
state: pid_state,
|
||||||
|
},
|
||||||
|
bnd,
|
||||||
|
upstreams,
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_upstream_selftest_data(shared: &ApiShared) -> Option<Vec<RuntimeMeSelftestUpstreamData>> {
|
||||||
|
let snapshot = shared.upstream_manager.try_api_snapshot()?;
|
||||||
|
if snapshot.summary.configured_total <= 1 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut upstream_bnd_by_id: HashMap<usize, _> = upstream_bnd_snapshots()
|
||||||
|
.into_iter()
|
||||||
|
.map(|entry| (entry.upstream_id, entry))
|
||||||
|
.collect();
|
||||||
|
let mut rows = Vec::with_capacity(snapshot.upstreams.len());
|
||||||
|
for upstream in snapshot.upstreams {
|
||||||
|
let upstream_bnd = upstream_bnd_by_id.remove(&upstream.upstream_id);
|
||||||
|
rows.push(RuntimeMeSelftestUpstreamData {
|
||||||
|
upstream_id: upstream.upstream_id,
|
||||||
|
route_kind: map_route_kind(upstream.route_kind),
|
||||||
|
address: upstream.address,
|
||||||
|
bnd: upstream_bnd.as_ref().map(|entry| RuntimeMeSelftestBndData {
|
||||||
|
addr_state: entry.addr_status,
|
||||||
|
port_state: entry.port_status,
|
||||||
|
last_addr: entry.last_addr.map(|value| value.to_string()),
|
||||||
|
last_seen_age_secs: entry.last_seen_age_secs,
|
||||||
|
}),
|
||||||
|
ip: upstream_bnd.and_then(|entry| entry.last_ip.map(|value| value.to_string())),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Some(rows)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update_kdf_ewma(now_epoch_secs: u64, total_errors: u64) -> f64 {
|
||||||
|
let Ok(mut guard) = kdf_ewma_state().lock() else {
|
||||||
|
return 0.0;
|
||||||
|
};
|
||||||
|
|
||||||
|
if !guard.initialized {
|
||||||
|
guard.initialized = true;
|
||||||
|
guard.last_epoch_secs = now_epoch_secs;
|
||||||
|
guard.last_total_errors = total_errors;
|
||||||
|
guard.ewma_errors_per_min = 0.0;
|
||||||
|
return guard.ewma_errors_per_min;
|
||||||
|
}
|
||||||
|
|
||||||
|
let dt_secs = now_epoch_secs.saturating_sub(guard.last_epoch_secs);
|
||||||
|
if dt_secs == 0 {
|
||||||
|
return guard.ewma_errors_per_min;
|
||||||
|
}
|
||||||
|
|
||||||
|
let delta_errors = total_errors.saturating_sub(guard.last_total_errors);
|
||||||
|
let instant_rate_per_min = (delta_errors as f64) * 60.0 / (dt_secs as f64);
|
||||||
|
let alpha = 1.0 - f64::exp(-(dt_secs as f64) / KDF_EWMA_TAU_SECS);
|
||||||
|
guard.ewma_errors_per_min = guard.ewma_errors_per_min
|
||||||
|
+ alpha * (instant_rate_per_min - guard.ewma_errors_per_min);
|
||||||
|
guard.last_epoch_secs = now_epoch_secs;
|
||||||
|
guard.last_total_errors = total_errors;
|
||||||
|
guard.ewma_errors_per_min
|
||||||
|
}
|
||||||
|
|
||||||
|
fn classify_ip(ip: IpAddr) -> &'static str {
|
||||||
|
if ip.is_loopback() {
|
||||||
|
return "loopback";
|
||||||
|
}
|
||||||
|
if is_bogon(ip) {
|
||||||
|
return "bogon";
|
||||||
|
}
|
||||||
|
"good"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn map_route_kind(value: UpstreamRouteKind) -> &'static str {
|
||||||
|
match value {
|
||||||
|
UpstreamRouteKind::Direct => "direct",
|
||||||
|
UpstreamRouteKind::Socks4 => "socks4",
|
||||||
|
UpstreamRouteKind::Socks5 => "socks5",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn round3(value: f64) -> f64 {
|
||||||
|
(value * 1000.0).round() / 1000.0
|
||||||
|
}
|
||||||
|
|
||||||
|
fn now_epoch_secs() -> u64 {
|
||||||
|
SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap_or_default()
|
||||||
|
.as_secs()
|
||||||
|
}
|
||||||
526
src/api/runtime_stats.rs
Normal file
526
src/api/runtime_stats.rs
Normal file
@@ -0,0 +1,526 @@
|
|||||||
|
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
|
use crate::config::ApiConfig;
|
||||||
|
use crate::stats::Stats;
|
||||||
|
use crate::transport::upstream::IpPreference;
|
||||||
|
use crate::transport::UpstreamRouteKind;
|
||||||
|
|
||||||
|
use super::ApiShared;
|
||||||
|
use super::model::{
|
||||||
|
DcEndpointWriters, DcStatus, DcStatusData, MeWriterStatus, MeWritersData, MeWritersSummary,
|
||||||
|
MinimalAllData, MinimalAllPayload, MinimalDcPathData, MinimalMeRuntimeData,
|
||||||
|
MinimalQuarantineData, UpstreamDcStatus, UpstreamStatus, UpstreamSummaryData, UpstreamsData,
|
||||||
|
ZeroAllData, ZeroCodeCount, ZeroCoreData, ZeroDesyncData, ZeroMiddleProxyData, ZeroPoolData,
|
||||||
|
ZeroUpstreamData,
|
||||||
|
};
|
||||||
|
|
||||||
|
const FEATURE_DISABLED_REASON: &str = "feature_disabled";
|
||||||
|
const SOURCE_UNAVAILABLE_REASON: &str = "source_unavailable";
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(crate) struct MinimalCacheEntry {
|
||||||
|
pub(super) expires_at: Instant,
|
||||||
|
pub(super) payload: MinimalAllPayload,
|
||||||
|
pub(super) generated_at_epoch_secs: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn build_zero_all_data(stats: &Stats, configured_users: usize) -> ZeroAllData {
|
||||||
|
let telemetry = stats.telemetry_policy();
|
||||||
|
let handshake_error_codes = stats
|
||||||
|
.get_me_handshake_error_code_counts()
|
||||||
|
.into_iter()
|
||||||
|
.map(|(code, total)| ZeroCodeCount { code, total })
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
ZeroAllData {
|
||||||
|
generated_at_epoch_secs: now_epoch_secs(),
|
||||||
|
core: ZeroCoreData {
|
||||||
|
uptime_seconds: stats.uptime_secs(),
|
||||||
|
connections_total: stats.get_connects_all(),
|
||||||
|
connections_bad_total: stats.get_connects_bad(),
|
||||||
|
handshake_timeouts_total: stats.get_handshake_timeouts(),
|
||||||
|
configured_users,
|
||||||
|
telemetry_core_enabled: telemetry.core_enabled,
|
||||||
|
telemetry_user_enabled: telemetry.user_enabled,
|
||||||
|
telemetry_me_level: telemetry.me_level.to_string(),
|
||||||
|
},
|
||||||
|
upstream: build_zero_upstream_data(stats),
|
||||||
|
middle_proxy: ZeroMiddleProxyData {
|
||||||
|
keepalive_sent_total: stats.get_me_keepalive_sent(),
|
||||||
|
keepalive_failed_total: stats.get_me_keepalive_failed(),
|
||||||
|
keepalive_pong_total: stats.get_me_keepalive_pong(),
|
||||||
|
keepalive_timeout_total: stats.get_me_keepalive_timeout(),
|
||||||
|
rpc_proxy_req_signal_sent_total: stats.get_me_rpc_proxy_req_signal_sent_total(),
|
||||||
|
rpc_proxy_req_signal_failed_total: stats.get_me_rpc_proxy_req_signal_failed_total(),
|
||||||
|
rpc_proxy_req_signal_skipped_no_meta_total: stats
|
||||||
|
.get_me_rpc_proxy_req_signal_skipped_no_meta_total(),
|
||||||
|
rpc_proxy_req_signal_response_total: stats.get_me_rpc_proxy_req_signal_response_total(),
|
||||||
|
rpc_proxy_req_signal_close_sent_total: stats
|
||||||
|
.get_me_rpc_proxy_req_signal_close_sent_total(),
|
||||||
|
reconnect_attempt_total: stats.get_me_reconnect_attempts(),
|
||||||
|
reconnect_success_total: stats.get_me_reconnect_success(),
|
||||||
|
handshake_reject_total: stats.get_me_handshake_reject_total(),
|
||||||
|
handshake_error_codes,
|
||||||
|
reader_eof_total: stats.get_me_reader_eof_total(),
|
||||||
|
idle_close_by_peer_total: stats.get_me_idle_close_by_peer_total(),
|
||||||
|
route_drop_no_conn_total: stats.get_me_route_drop_no_conn(),
|
||||||
|
route_drop_channel_closed_total: stats.get_me_route_drop_channel_closed(),
|
||||||
|
route_drop_queue_full_total: stats.get_me_route_drop_queue_full(),
|
||||||
|
route_drop_queue_full_base_total: stats.get_me_route_drop_queue_full_base(),
|
||||||
|
route_drop_queue_full_high_total: stats.get_me_route_drop_queue_full_high(),
|
||||||
|
socks_kdf_strict_reject_total: stats.get_me_socks_kdf_strict_reject(),
|
||||||
|
socks_kdf_compat_fallback_total: stats.get_me_socks_kdf_compat_fallback(),
|
||||||
|
endpoint_quarantine_total: stats.get_me_endpoint_quarantine_total(),
|
||||||
|
kdf_drift_total: stats.get_me_kdf_drift_total(),
|
||||||
|
kdf_port_only_drift_total: stats.get_me_kdf_port_only_drift_total(),
|
||||||
|
hardswap_pending_reuse_total: stats.get_me_hardswap_pending_reuse_total(),
|
||||||
|
hardswap_pending_ttl_expired_total: stats.get_me_hardswap_pending_ttl_expired_total(),
|
||||||
|
single_endpoint_outage_enter_total: stats.get_me_single_endpoint_outage_enter_total(),
|
||||||
|
single_endpoint_outage_exit_total: stats.get_me_single_endpoint_outage_exit_total(),
|
||||||
|
single_endpoint_outage_reconnect_attempt_total: stats
|
||||||
|
.get_me_single_endpoint_outage_reconnect_attempt_total(),
|
||||||
|
single_endpoint_outage_reconnect_success_total: stats
|
||||||
|
.get_me_single_endpoint_outage_reconnect_success_total(),
|
||||||
|
single_endpoint_quarantine_bypass_total: stats
|
||||||
|
.get_me_single_endpoint_quarantine_bypass_total(),
|
||||||
|
single_endpoint_shadow_rotate_total: stats.get_me_single_endpoint_shadow_rotate_total(),
|
||||||
|
single_endpoint_shadow_rotate_skipped_quarantine_total: stats
|
||||||
|
.get_me_single_endpoint_shadow_rotate_skipped_quarantine_total(),
|
||||||
|
floor_mode_switch_total: stats.get_me_floor_mode_switch_total(),
|
||||||
|
floor_mode_switch_static_to_adaptive_total: stats
|
||||||
|
.get_me_floor_mode_switch_static_to_adaptive_total(),
|
||||||
|
floor_mode_switch_adaptive_to_static_total: stats
|
||||||
|
.get_me_floor_mode_switch_adaptive_to_static_total(),
|
||||||
|
},
|
||||||
|
pool: ZeroPoolData {
|
||||||
|
pool_swap_total: stats.get_pool_swap_total(),
|
||||||
|
pool_drain_active: stats.get_pool_drain_active(),
|
||||||
|
pool_force_close_total: stats.get_pool_force_close_total(),
|
||||||
|
pool_stale_pick_total: stats.get_pool_stale_pick_total(),
|
||||||
|
writer_removed_total: stats.get_me_writer_removed_total(),
|
||||||
|
writer_removed_unexpected_total: stats.get_me_writer_removed_unexpected_total(),
|
||||||
|
refill_triggered_total: stats.get_me_refill_triggered_total(),
|
||||||
|
refill_skipped_inflight_total: stats.get_me_refill_skipped_inflight_total(),
|
||||||
|
refill_failed_total: stats.get_me_refill_failed_total(),
|
||||||
|
writer_restored_same_endpoint_total: stats.get_me_writer_restored_same_endpoint_total(),
|
||||||
|
writer_restored_fallback_total: stats.get_me_writer_restored_fallback_total(),
|
||||||
|
},
|
||||||
|
desync: ZeroDesyncData {
|
||||||
|
secure_padding_invalid_total: stats.get_secure_padding_invalid(),
|
||||||
|
desync_total: stats.get_desync_total(),
|
||||||
|
desync_full_logged_total: stats.get_desync_full_logged(),
|
||||||
|
desync_suppressed_total: stats.get_desync_suppressed(),
|
||||||
|
desync_frames_bucket_0: stats.get_desync_frames_bucket_0(),
|
||||||
|
desync_frames_bucket_1_2: stats.get_desync_frames_bucket_1_2(),
|
||||||
|
desync_frames_bucket_3_10: stats.get_desync_frames_bucket_3_10(),
|
||||||
|
desync_frames_bucket_gt_10: stats.get_desync_frames_bucket_gt_10(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_zero_upstream_data(stats: &Stats) -> ZeroUpstreamData {
|
||||||
|
ZeroUpstreamData {
|
||||||
|
connect_attempt_total: stats.get_upstream_connect_attempt_total(),
|
||||||
|
connect_success_total: stats.get_upstream_connect_success_total(),
|
||||||
|
connect_fail_total: stats.get_upstream_connect_fail_total(),
|
||||||
|
connect_failfast_hard_error_total: stats.get_upstream_connect_failfast_hard_error_total(),
|
||||||
|
connect_attempts_bucket_1: stats.get_upstream_connect_attempts_bucket_1(),
|
||||||
|
connect_attempts_bucket_2: stats.get_upstream_connect_attempts_bucket_2(),
|
||||||
|
connect_attempts_bucket_3_4: stats.get_upstream_connect_attempts_bucket_3_4(),
|
||||||
|
connect_attempts_bucket_gt_4: stats.get_upstream_connect_attempts_bucket_gt_4(),
|
||||||
|
connect_duration_success_bucket_le_100ms: stats
|
||||||
|
.get_upstream_connect_duration_success_bucket_le_100ms(),
|
||||||
|
connect_duration_success_bucket_101_500ms: stats
|
||||||
|
.get_upstream_connect_duration_success_bucket_101_500ms(),
|
||||||
|
connect_duration_success_bucket_501_1000ms: stats
|
||||||
|
.get_upstream_connect_duration_success_bucket_501_1000ms(),
|
||||||
|
connect_duration_success_bucket_gt_1000ms: stats
|
||||||
|
.get_upstream_connect_duration_success_bucket_gt_1000ms(),
|
||||||
|
connect_duration_fail_bucket_le_100ms: stats.get_upstream_connect_duration_fail_bucket_le_100ms(),
|
||||||
|
connect_duration_fail_bucket_101_500ms: stats
|
||||||
|
.get_upstream_connect_duration_fail_bucket_101_500ms(),
|
||||||
|
connect_duration_fail_bucket_501_1000ms: stats
|
||||||
|
.get_upstream_connect_duration_fail_bucket_501_1000ms(),
|
||||||
|
connect_duration_fail_bucket_gt_1000ms: stats
|
||||||
|
.get_upstream_connect_duration_fail_bucket_gt_1000ms(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn build_upstreams_data(shared: &ApiShared, api_cfg: &ApiConfig) -> UpstreamsData {
|
||||||
|
let generated_at_epoch_secs = now_epoch_secs();
|
||||||
|
let zero = build_zero_upstream_data(&shared.stats);
|
||||||
|
if !api_cfg.minimal_runtime_enabled {
|
||||||
|
return UpstreamsData {
|
||||||
|
enabled: false,
|
||||||
|
reason: Some(FEATURE_DISABLED_REASON),
|
||||||
|
generated_at_epoch_secs,
|
||||||
|
zero,
|
||||||
|
summary: None,
|
||||||
|
upstreams: None,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some(snapshot) = shared.upstream_manager.try_api_snapshot() else {
|
||||||
|
return UpstreamsData {
|
||||||
|
enabled: true,
|
||||||
|
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||||
|
generated_at_epoch_secs,
|
||||||
|
zero,
|
||||||
|
summary: None,
|
||||||
|
upstreams: None,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
let summary = UpstreamSummaryData {
|
||||||
|
configured_total: snapshot.summary.configured_total,
|
||||||
|
healthy_total: snapshot.summary.healthy_total,
|
||||||
|
unhealthy_total: snapshot.summary.unhealthy_total,
|
||||||
|
direct_total: snapshot.summary.direct_total,
|
||||||
|
socks4_total: snapshot.summary.socks4_total,
|
||||||
|
socks5_total: snapshot.summary.socks5_total,
|
||||||
|
};
|
||||||
|
let upstreams = snapshot
|
||||||
|
.upstreams
|
||||||
|
.into_iter()
|
||||||
|
.map(|upstream| UpstreamStatus {
|
||||||
|
upstream_id: upstream.upstream_id,
|
||||||
|
route_kind: map_route_kind(upstream.route_kind),
|
||||||
|
address: upstream.address,
|
||||||
|
weight: upstream.weight,
|
||||||
|
scopes: upstream.scopes,
|
||||||
|
healthy: upstream.healthy,
|
||||||
|
fails: upstream.fails,
|
||||||
|
last_check_age_secs: upstream.last_check_age_secs,
|
||||||
|
effective_latency_ms: upstream.effective_latency_ms,
|
||||||
|
dc: upstream
|
||||||
|
.dc
|
||||||
|
.into_iter()
|
||||||
|
.map(|dc| UpstreamDcStatus {
|
||||||
|
dc: dc.dc,
|
||||||
|
latency_ema_ms: dc.latency_ema_ms,
|
||||||
|
ip_preference: map_ip_preference(dc.ip_preference),
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
UpstreamsData {
|
||||||
|
enabled: true,
|
||||||
|
reason: None,
|
||||||
|
generated_at_epoch_secs,
|
||||||
|
zero,
|
||||||
|
summary: Some(summary),
|
||||||
|
upstreams: Some(upstreams),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn build_minimal_all_data(
|
||||||
|
shared: &ApiShared,
|
||||||
|
api_cfg: &ApiConfig,
|
||||||
|
) -> MinimalAllData {
|
||||||
|
let now = now_epoch_secs();
|
||||||
|
if !api_cfg.minimal_runtime_enabled {
|
||||||
|
return MinimalAllData {
|
||||||
|
enabled: false,
|
||||||
|
reason: Some(FEATURE_DISABLED_REASON),
|
||||||
|
generated_at_epoch_secs: now,
|
||||||
|
data: None,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some((generated_at_epoch_secs, payload)) =
|
||||||
|
get_minimal_payload_cached(shared, api_cfg.minimal_runtime_cache_ttl_ms).await
|
||||||
|
else {
|
||||||
|
return MinimalAllData {
|
||||||
|
enabled: true,
|
||||||
|
reason: Some(SOURCE_UNAVAILABLE_REASON),
|
||||||
|
generated_at_epoch_secs: now,
|
||||||
|
data: Some(MinimalAllPayload {
|
||||||
|
me_writers: disabled_me_writers(now, SOURCE_UNAVAILABLE_REASON),
|
||||||
|
dcs: disabled_dcs(now, SOURCE_UNAVAILABLE_REASON),
|
||||||
|
me_runtime: None,
|
||||||
|
network_path: Vec::new(),
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
MinimalAllData {
|
||||||
|
enabled: true,
|
||||||
|
reason: None,
|
||||||
|
generated_at_epoch_secs,
|
||||||
|
data: Some(payload),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn build_me_writers_data(
|
||||||
|
shared: &ApiShared,
|
||||||
|
api_cfg: &ApiConfig,
|
||||||
|
) -> MeWritersData {
|
||||||
|
let now = now_epoch_secs();
|
||||||
|
if !api_cfg.minimal_runtime_enabled {
|
||||||
|
return disabled_me_writers(now, FEATURE_DISABLED_REASON);
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some((_, payload)) =
|
||||||
|
get_minimal_payload_cached(shared, api_cfg.minimal_runtime_cache_ttl_ms).await
|
||||||
|
else {
|
||||||
|
return disabled_me_writers(now, SOURCE_UNAVAILABLE_REASON);
|
||||||
|
};
|
||||||
|
payload.me_writers
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn build_dcs_data(shared: &ApiShared, api_cfg: &ApiConfig) -> DcStatusData {
|
||||||
|
let now = now_epoch_secs();
|
||||||
|
if !api_cfg.minimal_runtime_enabled {
|
||||||
|
return disabled_dcs(now, FEATURE_DISABLED_REASON);
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some((_, payload)) =
|
||||||
|
get_minimal_payload_cached(shared, api_cfg.minimal_runtime_cache_ttl_ms).await
|
||||||
|
else {
|
||||||
|
return disabled_dcs(now, SOURCE_UNAVAILABLE_REASON);
|
||||||
|
};
|
||||||
|
payload.dcs
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_minimal_payload_cached(
|
||||||
|
shared: &ApiShared,
|
||||||
|
cache_ttl_ms: u64,
|
||||||
|
) -> Option<(u64, MinimalAllPayload)> {
|
||||||
|
if cache_ttl_ms > 0 {
|
||||||
|
let now = Instant::now();
|
||||||
|
let cached = shared.minimal_cache.lock().await.clone();
|
||||||
|
if let Some(entry) = cached
|
||||||
|
&& now < entry.expires_at
|
||||||
|
{
|
||||||
|
return Some((entry.generated_at_epoch_secs, entry.payload));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let pool = shared.me_pool.read().await.clone()?;
|
||||||
|
let status = pool.api_status_snapshot().await;
|
||||||
|
let runtime = pool.api_runtime_snapshot().await;
|
||||||
|
let generated_at_epoch_secs = status.generated_at_epoch_secs;
|
||||||
|
|
||||||
|
let me_writers = MeWritersData {
|
||||||
|
middle_proxy_enabled: true,
|
||||||
|
reason: None,
|
||||||
|
generated_at_epoch_secs,
|
||||||
|
summary: MeWritersSummary {
|
||||||
|
configured_dc_groups: status.configured_dc_groups,
|
||||||
|
configured_endpoints: status.configured_endpoints,
|
||||||
|
available_endpoints: status.available_endpoints,
|
||||||
|
available_pct: status.available_pct,
|
||||||
|
required_writers: status.required_writers,
|
||||||
|
alive_writers: status.alive_writers,
|
||||||
|
coverage_pct: status.coverage_pct,
|
||||||
|
},
|
||||||
|
writers: status
|
||||||
|
.writers
|
||||||
|
.into_iter()
|
||||||
|
.map(|entry| MeWriterStatus {
|
||||||
|
writer_id: entry.writer_id,
|
||||||
|
dc: entry.dc,
|
||||||
|
endpoint: entry.endpoint.to_string(),
|
||||||
|
generation: entry.generation,
|
||||||
|
state: entry.state,
|
||||||
|
draining: entry.draining,
|
||||||
|
degraded: entry.degraded,
|
||||||
|
bound_clients: entry.bound_clients,
|
||||||
|
idle_for_secs: entry.idle_for_secs,
|
||||||
|
rtt_ema_ms: entry.rtt_ema_ms,
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
};
|
||||||
|
let dcs = DcStatusData {
|
||||||
|
middle_proxy_enabled: true,
|
||||||
|
reason: None,
|
||||||
|
generated_at_epoch_secs,
|
||||||
|
dcs: status
|
||||||
|
.dcs
|
||||||
|
.into_iter()
|
||||||
|
.map(|entry| DcStatus {
|
||||||
|
dc: entry.dc,
|
||||||
|
endpoints: entry
|
||||||
|
.endpoints
|
||||||
|
.into_iter()
|
||||||
|
.map(|value| value.to_string())
|
||||||
|
.collect(),
|
||||||
|
endpoint_writers: entry
|
||||||
|
.endpoint_writers
|
||||||
|
.into_iter()
|
||||||
|
.map(|coverage| DcEndpointWriters {
|
||||||
|
endpoint: coverage.endpoint.to_string(),
|
||||||
|
active_writers: coverage.active_writers,
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
available_endpoints: entry.available_endpoints,
|
||||||
|
available_pct: entry.available_pct,
|
||||||
|
required_writers: entry.required_writers,
|
||||||
|
floor_min: entry.floor_min,
|
||||||
|
floor_target: entry.floor_target,
|
||||||
|
floor_max: entry.floor_max,
|
||||||
|
floor_capped: entry.floor_capped,
|
||||||
|
alive_writers: entry.alive_writers,
|
||||||
|
coverage_pct: entry.coverage_pct,
|
||||||
|
rtt_ms: entry.rtt_ms,
|
||||||
|
load: entry.load,
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
};
|
||||||
|
let me_runtime = MinimalMeRuntimeData {
|
||||||
|
active_generation: runtime.active_generation,
|
||||||
|
warm_generation: runtime.warm_generation,
|
||||||
|
pending_hardswap_generation: runtime.pending_hardswap_generation,
|
||||||
|
pending_hardswap_age_secs: runtime.pending_hardswap_age_secs,
|
||||||
|
hardswap_enabled: runtime.hardswap_enabled,
|
||||||
|
floor_mode: runtime.floor_mode,
|
||||||
|
adaptive_floor_idle_secs: runtime.adaptive_floor_idle_secs,
|
||||||
|
adaptive_floor_min_writers_single_endpoint: runtime
|
||||||
|
.adaptive_floor_min_writers_single_endpoint,
|
||||||
|
adaptive_floor_min_writers_multi_endpoint: runtime
|
||||||
|
.adaptive_floor_min_writers_multi_endpoint,
|
||||||
|
adaptive_floor_recover_grace_secs: runtime.adaptive_floor_recover_grace_secs,
|
||||||
|
adaptive_floor_writers_per_core_total: runtime
|
||||||
|
.adaptive_floor_writers_per_core_total,
|
||||||
|
adaptive_floor_cpu_cores_override: runtime.adaptive_floor_cpu_cores_override,
|
||||||
|
adaptive_floor_max_extra_writers_single_per_core: runtime
|
||||||
|
.adaptive_floor_max_extra_writers_single_per_core,
|
||||||
|
adaptive_floor_max_extra_writers_multi_per_core: runtime
|
||||||
|
.adaptive_floor_max_extra_writers_multi_per_core,
|
||||||
|
adaptive_floor_max_active_writers_per_core: runtime
|
||||||
|
.adaptive_floor_max_active_writers_per_core,
|
||||||
|
adaptive_floor_max_warm_writers_per_core: runtime
|
||||||
|
.adaptive_floor_max_warm_writers_per_core,
|
||||||
|
adaptive_floor_max_active_writers_global: runtime
|
||||||
|
.adaptive_floor_max_active_writers_global,
|
||||||
|
adaptive_floor_max_warm_writers_global: runtime
|
||||||
|
.adaptive_floor_max_warm_writers_global,
|
||||||
|
adaptive_floor_cpu_cores_detected: runtime.adaptive_floor_cpu_cores_detected,
|
||||||
|
adaptive_floor_cpu_cores_effective: runtime.adaptive_floor_cpu_cores_effective,
|
||||||
|
adaptive_floor_global_cap_raw: runtime.adaptive_floor_global_cap_raw,
|
||||||
|
adaptive_floor_global_cap_effective: runtime.adaptive_floor_global_cap_effective,
|
||||||
|
adaptive_floor_target_writers_total: runtime.adaptive_floor_target_writers_total,
|
||||||
|
adaptive_floor_active_cap_configured: runtime.adaptive_floor_active_cap_configured,
|
||||||
|
adaptive_floor_active_cap_effective: runtime.adaptive_floor_active_cap_effective,
|
||||||
|
adaptive_floor_warm_cap_configured: runtime.adaptive_floor_warm_cap_configured,
|
||||||
|
adaptive_floor_warm_cap_effective: runtime.adaptive_floor_warm_cap_effective,
|
||||||
|
adaptive_floor_active_writers_current: runtime.adaptive_floor_active_writers_current,
|
||||||
|
adaptive_floor_warm_writers_current: runtime.adaptive_floor_warm_writers_current,
|
||||||
|
me_keepalive_enabled: runtime.me_keepalive_enabled,
|
||||||
|
me_keepalive_interval_secs: runtime.me_keepalive_interval_secs,
|
||||||
|
me_keepalive_jitter_secs: runtime.me_keepalive_jitter_secs,
|
||||||
|
me_keepalive_payload_random: runtime.me_keepalive_payload_random,
|
||||||
|
rpc_proxy_req_every_secs: runtime.rpc_proxy_req_every_secs,
|
||||||
|
me_reconnect_max_concurrent_per_dc: runtime.me_reconnect_max_concurrent_per_dc,
|
||||||
|
me_reconnect_backoff_base_ms: runtime.me_reconnect_backoff_base_ms,
|
||||||
|
me_reconnect_backoff_cap_ms: runtime.me_reconnect_backoff_cap_ms,
|
||||||
|
me_reconnect_fast_retry_count: runtime.me_reconnect_fast_retry_count,
|
||||||
|
me_pool_drain_ttl_secs: runtime.me_pool_drain_ttl_secs,
|
||||||
|
me_pool_force_close_secs: runtime.me_pool_force_close_secs,
|
||||||
|
me_pool_min_fresh_ratio: runtime.me_pool_min_fresh_ratio,
|
||||||
|
me_bind_stale_mode: runtime.me_bind_stale_mode,
|
||||||
|
me_bind_stale_ttl_secs: runtime.me_bind_stale_ttl_secs,
|
||||||
|
me_single_endpoint_shadow_writers: runtime.me_single_endpoint_shadow_writers,
|
||||||
|
me_single_endpoint_outage_mode_enabled: runtime.me_single_endpoint_outage_mode_enabled,
|
||||||
|
me_single_endpoint_outage_disable_quarantine: runtime
|
||||||
|
.me_single_endpoint_outage_disable_quarantine,
|
||||||
|
me_single_endpoint_outage_backoff_min_ms: runtime.me_single_endpoint_outage_backoff_min_ms,
|
||||||
|
me_single_endpoint_outage_backoff_max_ms: runtime.me_single_endpoint_outage_backoff_max_ms,
|
||||||
|
me_single_endpoint_shadow_rotate_every_secs: runtime
|
||||||
|
.me_single_endpoint_shadow_rotate_every_secs,
|
||||||
|
me_deterministic_writer_sort: runtime.me_deterministic_writer_sort,
|
||||||
|
me_writer_pick_mode: runtime.me_writer_pick_mode,
|
||||||
|
me_writer_pick_sample_size: runtime.me_writer_pick_sample_size,
|
||||||
|
me_socks_kdf_policy: runtime.me_socks_kdf_policy,
|
||||||
|
quarantined_endpoints_total: runtime.quarantined_endpoints.len(),
|
||||||
|
quarantined_endpoints: runtime
|
||||||
|
.quarantined_endpoints
|
||||||
|
.into_iter()
|
||||||
|
.map(|entry| MinimalQuarantineData {
|
||||||
|
endpoint: entry.endpoint.to_string(),
|
||||||
|
remaining_ms: entry.remaining_ms,
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
};
|
||||||
|
let network_path = runtime
|
||||||
|
.network_path
|
||||||
|
.into_iter()
|
||||||
|
.map(|entry| MinimalDcPathData {
|
||||||
|
dc: entry.dc,
|
||||||
|
ip_preference: entry.ip_preference,
|
||||||
|
selected_addr_v4: entry.selected_addr_v4.map(|value| value.to_string()),
|
||||||
|
selected_addr_v6: entry.selected_addr_v6.map(|value| value.to_string()),
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let payload = MinimalAllPayload {
|
||||||
|
me_writers,
|
||||||
|
dcs,
|
||||||
|
me_runtime: Some(me_runtime),
|
||||||
|
network_path,
|
||||||
|
};
|
||||||
|
|
||||||
|
if cache_ttl_ms > 0 {
|
||||||
|
let entry = MinimalCacheEntry {
|
||||||
|
expires_at: Instant::now() + Duration::from_millis(cache_ttl_ms),
|
||||||
|
payload: payload.clone(),
|
||||||
|
generated_at_epoch_secs,
|
||||||
|
};
|
||||||
|
*shared.minimal_cache.lock().await = Some(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
Some((generated_at_epoch_secs, payload))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn disabled_me_writers(now_epoch_secs: u64, reason: &'static str) -> MeWritersData {
|
||||||
|
MeWritersData {
|
||||||
|
middle_proxy_enabled: false,
|
||||||
|
reason: Some(reason),
|
||||||
|
generated_at_epoch_secs: now_epoch_secs,
|
||||||
|
summary: MeWritersSummary {
|
||||||
|
configured_dc_groups: 0,
|
||||||
|
configured_endpoints: 0,
|
||||||
|
available_endpoints: 0,
|
||||||
|
available_pct: 0.0,
|
||||||
|
required_writers: 0,
|
||||||
|
alive_writers: 0,
|
||||||
|
coverage_pct: 0.0,
|
||||||
|
},
|
||||||
|
writers: Vec::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn disabled_dcs(now_epoch_secs: u64, reason: &'static str) -> DcStatusData {
|
||||||
|
DcStatusData {
|
||||||
|
middle_proxy_enabled: false,
|
||||||
|
reason: Some(reason),
|
||||||
|
generated_at_epoch_secs: now_epoch_secs,
|
||||||
|
dcs: Vec::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn map_route_kind(value: UpstreamRouteKind) -> &'static str {
|
||||||
|
match value {
|
||||||
|
UpstreamRouteKind::Direct => "direct",
|
||||||
|
UpstreamRouteKind::Socks4 => "socks4",
|
||||||
|
UpstreamRouteKind::Socks5 => "socks5",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn map_ip_preference(value: IpPreference) -> &'static str {
|
||||||
|
match value {
|
||||||
|
IpPreference::Unknown => "unknown",
|
||||||
|
IpPreference::PreferV6 => "prefer_v6",
|
||||||
|
IpPreference::PreferV4 => "prefer_v4",
|
||||||
|
IpPreference::BothWork => "both_work",
|
||||||
|
IpPreference::Unavailable => "unavailable",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn now_epoch_secs() -> u64 {
|
||||||
|
SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap_or_default()
|
||||||
|
.as_secs()
|
||||||
|
}
|
||||||
66
src/api/runtime_watch.rs
Normal file
66
src/api/runtime_watch.rs
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::Ordering;
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
|
use tokio::sync::watch;
|
||||||
|
|
||||||
|
use crate::config::ProxyConfig;
|
||||||
|
|
||||||
|
use super::ApiRuntimeState;
|
||||||
|
use super::events::ApiEventStore;
|
||||||
|
|
||||||
|
pub(super) fn spawn_runtime_watchers(
|
||||||
|
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||||
|
admission_rx: watch::Receiver<bool>,
|
||||||
|
runtime_state: Arc<ApiRuntimeState>,
|
||||||
|
runtime_events: Arc<ApiEventStore>,
|
||||||
|
) {
|
||||||
|
let mut config_rx_reload = config_rx;
|
||||||
|
let runtime_state_reload = runtime_state.clone();
|
||||||
|
let runtime_events_reload = runtime_events.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
if config_rx_reload.changed().await.is_err() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
runtime_state_reload
|
||||||
|
.config_reload_count
|
||||||
|
.fetch_add(1, Ordering::Relaxed);
|
||||||
|
runtime_state_reload
|
||||||
|
.last_config_reload_epoch_secs
|
||||||
|
.store(now_epoch_secs(), Ordering::Relaxed);
|
||||||
|
runtime_events_reload.record("config.reload.applied", "config receiver updated");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut admission_rx_watch = admission_rx;
|
||||||
|
tokio::spawn(async move {
|
||||||
|
runtime_state
|
||||||
|
.admission_open
|
||||||
|
.store(*admission_rx_watch.borrow(), Ordering::Relaxed);
|
||||||
|
runtime_events.record(
|
||||||
|
"admission.state",
|
||||||
|
format!("accepting_new_connections={}", *admission_rx_watch.borrow()),
|
||||||
|
);
|
||||||
|
loop {
|
||||||
|
if admission_rx_watch.changed().await.is_err() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let admission_open = *admission_rx_watch.borrow();
|
||||||
|
runtime_state
|
||||||
|
.admission_open
|
||||||
|
.store(admission_open, Ordering::Relaxed);
|
||||||
|
runtime_events.record(
|
||||||
|
"admission.state",
|
||||||
|
format!("accepting_new_connections={}", admission_open),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn now_epoch_secs() -> u64 {
|
||||||
|
SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap_or_default()
|
||||||
|
.as_secs()
|
||||||
|
}
|
||||||
305
src/api/runtime_zero.rs
Normal file
305
src/api/runtime_zero.rs
Normal file
@@ -0,0 +1,305 @@
|
|||||||
|
use std::sync::atomic::Ordering;
|
||||||
|
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::config::{MeFloorMode, MeWriterPickMode, ProxyConfig, UserMaxUniqueIpsMode};
|
||||||
|
use crate::proxy::route_mode::RelayRouteMode;
|
||||||
|
|
||||||
|
use super::ApiShared;
|
||||||
|
use super::runtime_init::build_runtime_startup_summary;
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct SystemInfoData {
|
||||||
|
pub(super) version: String,
|
||||||
|
pub(super) target_arch: String,
|
||||||
|
pub(super) target_os: String,
|
||||||
|
pub(super) build_profile: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) git_commit: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) build_time_utc: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) rustc_version: Option<String>,
|
||||||
|
pub(super) process_started_at_epoch_secs: u64,
|
||||||
|
pub(super) uptime_seconds: f64,
|
||||||
|
pub(super) config_path: String,
|
||||||
|
pub(super) config_hash: String,
|
||||||
|
pub(super) config_reload_count: u64,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) last_config_reload_epoch_secs: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct RuntimeGatesData {
|
||||||
|
pub(super) accepting_new_connections: bool,
|
||||||
|
pub(super) conditional_cast_enabled: bool,
|
||||||
|
pub(super) me_runtime_ready: bool,
|
||||||
|
pub(super) me2dc_fallback_enabled: bool,
|
||||||
|
pub(super) use_middle_proxy: bool,
|
||||||
|
pub(super) route_mode: &'static str,
|
||||||
|
pub(super) reroute_active: bool,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub(super) reroute_to_direct_at_epoch_secs: Option<u64>,
|
||||||
|
pub(super) startup_status: &'static str,
|
||||||
|
pub(super) startup_stage: String,
|
||||||
|
pub(super) startup_progress_pct: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct EffectiveTimeoutLimits {
|
||||||
|
pub(super) client_handshake_secs: u64,
|
||||||
|
pub(super) tg_connect_secs: u64,
|
||||||
|
pub(super) client_keepalive_secs: u64,
|
||||||
|
pub(super) client_ack_secs: u64,
|
||||||
|
pub(super) me_one_retry: u8,
|
||||||
|
pub(super) me_one_timeout_ms: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct EffectiveUpstreamLimits {
|
||||||
|
pub(super) connect_retry_attempts: u32,
|
||||||
|
pub(super) connect_retry_backoff_ms: u64,
|
||||||
|
pub(super) connect_budget_ms: u64,
|
||||||
|
pub(super) unhealthy_fail_threshold: u32,
|
||||||
|
pub(super) connect_failfast_hard_errors: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct EffectiveMiddleProxyLimits {
|
||||||
|
pub(super) floor_mode: &'static str,
|
||||||
|
pub(super) adaptive_floor_idle_secs: u64,
|
||||||
|
pub(super) adaptive_floor_min_writers_single_endpoint: u8,
|
||||||
|
pub(super) adaptive_floor_min_writers_multi_endpoint: u8,
|
||||||
|
pub(super) adaptive_floor_recover_grace_secs: u64,
|
||||||
|
pub(super) adaptive_floor_writers_per_core_total: u16,
|
||||||
|
pub(super) adaptive_floor_cpu_cores_override: u16,
|
||||||
|
pub(super) adaptive_floor_max_extra_writers_single_per_core: u16,
|
||||||
|
pub(super) adaptive_floor_max_extra_writers_multi_per_core: u16,
|
||||||
|
pub(super) adaptive_floor_max_active_writers_per_core: u16,
|
||||||
|
pub(super) adaptive_floor_max_warm_writers_per_core: u16,
|
||||||
|
pub(super) adaptive_floor_max_active_writers_global: u32,
|
||||||
|
pub(super) adaptive_floor_max_warm_writers_global: u32,
|
||||||
|
pub(super) reconnect_max_concurrent_per_dc: u32,
|
||||||
|
pub(super) reconnect_backoff_base_ms: u64,
|
||||||
|
pub(super) reconnect_backoff_cap_ms: u64,
|
||||||
|
pub(super) reconnect_fast_retry_count: u32,
|
||||||
|
pub(super) writer_pick_mode: &'static str,
|
||||||
|
pub(super) writer_pick_sample_size: u8,
|
||||||
|
pub(super) me2dc_fallback: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct EffectiveUserIpPolicyLimits {
|
||||||
|
pub(super) mode: &'static str,
|
||||||
|
pub(super) window_secs: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct EffectiveLimitsData {
|
||||||
|
pub(super) update_every_secs: u64,
|
||||||
|
pub(super) me_reinit_every_secs: u64,
|
||||||
|
pub(super) me_pool_force_close_secs: u64,
|
||||||
|
pub(super) timeouts: EffectiveTimeoutLimits,
|
||||||
|
pub(super) upstream: EffectiveUpstreamLimits,
|
||||||
|
pub(super) middle_proxy: EffectiveMiddleProxyLimits,
|
||||||
|
pub(super) user_ip_policy: EffectiveUserIpPolicyLimits,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(super) struct SecurityPostureData {
|
||||||
|
pub(super) api_read_only: bool,
|
||||||
|
pub(super) api_whitelist_enabled: bool,
|
||||||
|
pub(super) api_whitelist_entries: usize,
|
||||||
|
pub(super) api_auth_header_enabled: bool,
|
||||||
|
pub(super) proxy_protocol_enabled: bool,
|
||||||
|
pub(super) log_level: String,
|
||||||
|
pub(super) telemetry_core_enabled: bool,
|
||||||
|
pub(super) telemetry_user_enabled: bool,
|
||||||
|
pub(super) telemetry_me_level: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn build_system_info_data(
|
||||||
|
shared: &ApiShared,
|
||||||
|
_cfg: &ProxyConfig,
|
||||||
|
revision: &str,
|
||||||
|
) -> SystemInfoData {
|
||||||
|
let last_reload_epoch_secs = shared
|
||||||
|
.runtime_state
|
||||||
|
.last_config_reload_epoch_secs
|
||||||
|
.load(Ordering::Relaxed);
|
||||||
|
let last_config_reload_epoch_secs = (last_reload_epoch_secs > 0).then_some(last_reload_epoch_secs);
|
||||||
|
|
||||||
|
let git_commit = option_env!("TELEMT_GIT_COMMIT")
|
||||||
|
.or(option_env!("VERGEN_GIT_SHA"))
|
||||||
|
.or(option_env!("GIT_COMMIT"))
|
||||||
|
.map(ToString::to_string);
|
||||||
|
let build_time_utc = option_env!("BUILD_TIME_UTC")
|
||||||
|
.or(option_env!("VERGEN_BUILD_TIMESTAMP"))
|
||||||
|
.map(ToString::to_string);
|
||||||
|
let rustc_version = option_env!("RUSTC_VERSION")
|
||||||
|
.or(option_env!("VERGEN_RUSTC_SEMVER"))
|
||||||
|
.map(ToString::to_string);
|
||||||
|
|
||||||
|
SystemInfoData {
|
||||||
|
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||||
|
target_arch: std::env::consts::ARCH.to_string(),
|
||||||
|
target_os: std::env::consts::OS.to_string(),
|
||||||
|
build_profile: option_env!("PROFILE").unwrap_or("unknown").to_string(),
|
||||||
|
git_commit,
|
||||||
|
build_time_utc,
|
||||||
|
rustc_version,
|
||||||
|
process_started_at_epoch_secs: shared.runtime_state.process_started_at_epoch_secs,
|
||||||
|
uptime_seconds: shared.stats.uptime_secs(),
|
||||||
|
config_path: shared.config_path.display().to_string(),
|
||||||
|
config_hash: revision.to_string(),
|
||||||
|
config_reload_count: shared.runtime_state.config_reload_count.load(Ordering::Relaxed),
|
||||||
|
last_config_reload_epoch_secs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn build_runtime_gates_data(
|
||||||
|
shared: &ApiShared,
|
||||||
|
cfg: &ProxyConfig,
|
||||||
|
) -> RuntimeGatesData {
|
||||||
|
let startup_summary = build_runtime_startup_summary(shared).await;
|
||||||
|
let route_state = shared.route_runtime.snapshot();
|
||||||
|
let route_mode = route_state.mode.as_str();
|
||||||
|
let reroute_active = cfg.general.use_middle_proxy
|
||||||
|
&& cfg.general.me2dc_fallback
|
||||||
|
&& matches!(route_state.mode, RelayRouteMode::Direct);
|
||||||
|
let reroute_to_direct_at_epoch_secs = if reroute_active {
|
||||||
|
shared.route_runtime.direct_since_epoch_secs()
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
let me_runtime_ready = if !cfg.general.use_middle_proxy {
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
shared
|
||||||
|
.me_pool
|
||||||
|
.read()
|
||||||
|
.await
|
||||||
|
.as_ref()
|
||||||
|
.map(|pool| pool.is_runtime_ready())
|
||||||
|
.unwrap_or(false)
|
||||||
|
};
|
||||||
|
|
||||||
|
RuntimeGatesData {
|
||||||
|
accepting_new_connections: shared.runtime_state.admission_open.load(Ordering::Relaxed),
|
||||||
|
conditional_cast_enabled: cfg.general.use_middle_proxy,
|
||||||
|
me_runtime_ready,
|
||||||
|
me2dc_fallback_enabled: cfg.general.me2dc_fallback,
|
||||||
|
use_middle_proxy: cfg.general.use_middle_proxy,
|
||||||
|
route_mode,
|
||||||
|
reroute_active,
|
||||||
|
reroute_to_direct_at_epoch_secs,
|
||||||
|
startup_status: startup_summary.status,
|
||||||
|
startup_stage: startup_summary.stage,
|
||||||
|
startup_progress_pct: startup_summary.progress_pct,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn build_limits_effective_data(cfg: &ProxyConfig) -> EffectiveLimitsData {
|
||||||
|
EffectiveLimitsData {
|
||||||
|
update_every_secs: cfg.general.effective_update_every_secs(),
|
||||||
|
me_reinit_every_secs: cfg.general.effective_me_reinit_every_secs(),
|
||||||
|
me_pool_force_close_secs: cfg.general.effective_me_pool_force_close_secs(),
|
||||||
|
timeouts: EffectiveTimeoutLimits {
|
||||||
|
client_handshake_secs: cfg.timeouts.client_handshake,
|
||||||
|
tg_connect_secs: cfg.timeouts.tg_connect,
|
||||||
|
client_keepalive_secs: cfg.timeouts.client_keepalive,
|
||||||
|
client_ack_secs: cfg.timeouts.client_ack,
|
||||||
|
me_one_retry: cfg.timeouts.me_one_retry,
|
||||||
|
me_one_timeout_ms: cfg.timeouts.me_one_timeout_ms,
|
||||||
|
},
|
||||||
|
upstream: EffectiveUpstreamLimits {
|
||||||
|
connect_retry_attempts: cfg.general.upstream_connect_retry_attempts,
|
||||||
|
connect_retry_backoff_ms: cfg.general.upstream_connect_retry_backoff_ms,
|
||||||
|
connect_budget_ms: cfg.general.upstream_connect_budget_ms,
|
||||||
|
unhealthy_fail_threshold: cfg.general.upstream_unhealthy_fail_threshold,
|
||||||
|
connect_failfast_hard_errors: cfg.general.upstream_connect_failfast_hard_errors,
|
||||||
|
},
|
||||||
|
middle_proxy: EffectiveMiddleProxyLimits {
|
||||||
|
floor_mode: me_floor_mode_label(cfg.general.me_floor_mode),
|
||||||
|
adaptive_floor_idle_secs: cfg.general.me_adaptive_floor_idle_secs,
|
||||||
|
adaptive_floor_min_writers_single_endpoint: cfg
|
||||||
|
.general
|
||||||
|
.me_adaptive_floor_min_writers_single_endpoint,
|
||||||
|
adaptive_floor_min_writers_multi_endpoint: cfg
|
||||||
|
.general
|
||||||
|
.me_adaptive_floor_min_writers_multi_endpoint,
|
||||||
|
adaptive_floor_recover_grace_secs: cfg.general.me_adaptive_floor_recover_grace_secs,
|
||||||
|
adaptive_floor_writers_per_core_total: cfg
|
||||||
|
.general
|
||||||
|
.me_adaptive_floor_writers_per_core_total,
|
||||||
|
adaptive_floor_cpu_cores_override: cfg
|
||||||
|
.general
|
||||||
|
.me_adaptive_floor_cpu_cores_override,
|
||||||
|
adaptive_floor_max_extra_writers_single_per_core: cfg
|
||||||
|
.general
|
||||||
|
.me_adaptive_floor_max_extra_writers_single_per_core,
|
||||||
|
adaptive_floor_max_extra_writers_multi_per_core: cfg
|
||||||
|
.general
|
||||||
|
.me_adaptive_floor_max_extra_writers_multi_per_core,
|
||||||
|
adaptive_floor_max_active_writers_per_core: cfg
|
||||||
|
.general
|
||||||
|
.me_adaptive_floor_max_active_writers_per_core,
|
||||||
|
adaptive_floor_max_warm_writers_per_core: cfg
|
||||||
|
.general
|
||||||
|
.me_adaptive_floor_max_warm_writers_per_core,
|
||||||
|
adaptive_floor_max_active_writers_global: cfg
|
||||||
|
.general
|
||||||
|
.me_adaptive_floor_max_active_writers_global,
|
||||||
|
adaptive_floor_max_warm_writers_global: cfg
|
||||||
|
.general
|
||||||
|
.me_adaptive_floor_max_warm_writers_global,
|
||||||
|
reconnect_max_concurrent_per_dc: cfg.general.me_reconnect_max_concurrent_per_dc,
|
||||||
|
reconnect_backoff_base_ms: cfg.general.me_reconnect_backoff_base_ms,
|
||||||
|
reconnect_backoff_cap_ms: cfg.general.me_reconnect_backoff_cap_ms,
|
||||||
|
reconnect_fast_retry_count: cfg.general.me_reconnect_fast_retry_count,
|
||||||
|
writer_pick_mode: me_writer_pick_mode_label(cfg.general.me_writer_pick_mode),
|
||||||
|
writer_pick_sample_size: cfg.general.me_writer_pick_sample_size,
|
||||||
|
me2dc_fallback: cfg.general.me2dc_fallback,
|
||||||
|
},
|
||||||
|
user_ip_policy: EffectiveUserIpPolicyLimits {
|
||||||
|
mode: user_max_unique_ips_mode_label(cfg.access.user_max_unique_ips_mode),
|
||||||
|
window_secs: cfg.access.user_max_unique_ips_window_secs,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn build_security_posture_data(cfg: &ProxyConfig) -> SecurityPostureData {
|
||||||
|
SecurityPostureData {
|
||||||
|
api_read_only: cfg.server.api.read_only,
|
||||||
|
api_whitelist_enabled: !cfg.server.api.whitelist.is_empty(),
|
||||||
|
api_whitelist_entries: cfg.server.api.whitelist.len(),
|
||||||
|
api_auth_header_enabled: !cfg.server.api.auth_header.is_empty(),
|
||||||
|
proxy_protocol_enabled: cfg.server.proxy_protocol,
|
||||||
|
log_level: cfg.general.log_level.to_string(),
|
||||||
|
telemetry_core_enabled: cfg.general.telemetry.core_enabled,
|
||||||
|
telemetry_user_enabled: cfg.general.telemetry.user_enabled,
|
||||||
|
telemetry_me_level: cfg.general.telemetry.me_level.to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn user_max_unique_ips_mode_label(mode: UserMaxUniqueIpsMode) -> &'static str {
|
||||||
|
match mode {
|
||||||
|
UserMaxUniqueIpsMode::ActiveWindow => "active_window",
|
||||||
|
UserMaxUniqueIpsMode::TimeWindow => "time_window",
|
||||||
|
UserMaxUniqueIpsMode::Combined => "combined",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn me_floor_mode_label(mode: MeFloorMode) -> &'static str {
|
||||||
|
match mode {
|
||||||
|
MeFloorMode::Static => "static",
|
||||||
|
MeFloorMode::Adaptive => "adaptive",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn me_writer_pick_mode_label(mode: MeWriterPickMode) -> &'static str {
|
||||||
|
match mode {
|
||||||
|
MeWriterPickMode::SortedRr => "sorted_rr",
|
||||||
|
MeWriterPickMode::P2c => "p2c",
|
||||||
|
}
|
||||||
|
}
|
||||||
551
src/api/users.rs
Normal file
551
src/api/users.rs
Normal file
@@ -0,0 +1,551 @@
|
|||||||
|
use std::net::IpAddr;
|
||||||
|
|
||||||
|
use hyper::StatusCode;
|
||||||
|
|
||||||
|
use crate::config::ProxyConfig;
|
||||||
|
use crate::ip_tracker::UserIpTracker;
|
||||||
|
use crate::stats::Stats;
|
||||||
|
|
||||||
|
use super::ApiShared;
|
||||||
|
use super::config_store::{
|
||||||
|
AccessSection, ensure_expected_revision, load_config_from_disk, save_access_sections_to_disk,
|
||||||
|
save_config_to_disk,
|
||||||
|
};
|
||||||
|
use super::model::{
|
||||||
|
ApiFailure, CreateUserRequest, CreateUserResponse, PatchUserRequest, RotateSecretRequest,
|
||||||
|
UserInfo, UserLinks, is_valid_ad_tag, is_valid_user_secret, is_valid_username,
|
||||||
|
parse_optional_expiration, random_user_secret,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub(super) async fn create_user(
|
||||||
|
body: CreateUserRequest,
|
||||||
|
expected_revision: Option<String>,
|
||||||
|
shared: &ApiShared,
|
||||||
|
) -> Result<(CreateUserResponse, String), ApiFailure> {
|
||||||
|
let touches_user_ad_tags = body.user_ad_tag.is_some();
|
||||||
|
let touches_user_max_tcp_conns = body.max_tcp_conns.is_some();
|
||||||
|
let touches_user_expirations = body.expiration_rfc3339.is_some();
|
||||||
|
let touches_user_data_quota = body.data_quota_bytes.is_some();
|
||||||
|
let touches_user_max_unique_ips = body.max_unique_ips.is_some();
|
||||||
|
|
||||||
|
if !is_valid_username(&body.username) {
|
||||||
|
return Err(ApiFailure::bad_request(
|
||||||
|
"username must match [A-Za-z0-9_.-] and be 1..64 chars",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let secret = match body.secret {
|
||||||
|
Some(secret) => {
|
||||||
|
if !is_valid_user_secret(&secret) {
|
||||||
|
return Err(ApiFailure::bad_request(
|
||||||
|
"secret must be exactly 32 hex characters",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
secret
|
||||||
|
}
|
||||||
|
None => random_user_secret(),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(ad_tag) = body.user_ad_tag.as_ref() && !is_valid_ad_tag(ad_tag) {
|
||||||
|
return Err(ApiFailure::bad_request(
|
||||||
|
"user_ad_tag must be exactly 32 hex characters",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let expiration = parse_optional_expiration(body.expiration_rfc3339.as_deref())?;
|
||||||
|
let _guard = shared.mutation_lock.lock().await;
|
||||||
|
let mut cfg = load_config_from_disk(&shared.config_path).await?;
|
||||||
|
ensure_expected_revision(&shared.config_path, expected_revision.as_deref()).await?;
|
||||||
|
|
||||||
|
if cfg.access.users.contains_key(&body.username) {
|
||||||
|
return Err(ApiFailure::new(
|
||||||
|
StatusCode::CONFLICT,
|
||||||
|
"user_exists",
|
||||||
|
"User already exists",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg.access.users.insert(body.username.clone(), secret.clone());
|
||||||
|
if let Some(ad_tag) = body.user_ad_tag {
|
||||||
|
cfg.access.user_ad_tags.insert(body.username.clone(), ad_tag);
|
||||||
|
}
|
||||||
|
if let Some(limit) = body.max_tcp_conns {
|
||||||
|
cfg.access.user_max_tcp_conns.insert(body.username.clone(), limit);
|
||||||
|
}
|
||||||
|
if let Some(expiration) = expiration {
|
||||||
|
cfg.access
|
||||||
|
.user_expirations
|
||||||
|
.insert(body.username.clone(), expiration);
|
||||||
|
}
|
||||||
|
if let Some(quota) = body.data_quota_bytes {
|
||||||
|
cfg.access.user_data_quota.insert(body.username.clone(), quota);
|
||||||
|
}
|
||||||
|
|
||||||
|
let updated_limit = body.max_unique_ips;
|
||||||
|
if let Some(limit) = updated_limit {
|
||||||
|
cfg.access
|
||||||
|
.user_max_unique_ips
|
||||||
|
.insert(body.username.clone(), limit);
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg.validate()
|
||||||
|
.map_err(|e| ApiFailure::bad_request(format!("config validation failed: {}", e)))?;
|
||||||
|
|
||||||
|
let mut touched_sections = vec![AccessSection::Users];
|
||||||
|
if touches_user_ad_tags {
|
||||||
|
touched_sections.push(AccessSection::UserAdTags);
|
||||||
|
}
|
||||||
|
if touches_user_max_tcp_conns {
|
||||||
|
touched_sections.push(AccessSection::UserMaxTcpConns);
|
||||||
|
}
|
||||||
|
if touches_user_expirations {
|
||||||
|
touched_sections.push(AccessSection::UserExpirations);
|
||||||
|
}
|
||||||
|
if touches_user_data_quota {
|
||||||
|
touched_sections.push(AccessSection::UserDataQuota);
|
||||||
|
}
|
||||||
|
if touches_user_max_unique_ips {
|
||||||
|
touched_sections.push(AccessSection::UserMaxUniqueIps);
|
||||||
|
}
|
||||||
|
|
||||||
|
let revision = save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
|
||||||
|
drop(_guard);
|
||||||
|
|
||||||
|
if let Some(limit) = updated_limit {
|
||||||
|
shared.ip_tracker.set_user_limit(&body.username, limit).await;
|
||||||
|
}
|
||||||
|
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||||
|
|
||||||
|
let users = users_from_config(
|
||||||
|
&cfg,
|
||||||
|
&shared.stats,
|
||||||
|
&shared.ip_tracker,
|
||||||
|
detected_ip_v4,
|
||||||
|
detected_ip_v6,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let user = users
|
||||||
|
.into_iter()
|
||||||
|
.find(|entry| entry.username == body.username)
|
||||||
|
.unwrap_or(UserInfo {
|
||||||
|
username: body.username.clone(),
|
||||||
|
user_ad_tag: None,
|
||||||
|
max_tcp_conns: None,
|
||||||
|
expiration_rfc3339: None,
|
||||||
|
data_quota_bytes: None,
|
||||||
|
max_unique_ips: updated_limit,
|
||||||
|
current_connections: 0,
|
||||||
|
active_unique_ips: 0,
|
||||||
|
active_unique_ips_list: Vec::new(),
|
||||||
|
recent_unique_ips: 0,
|
||||||
|
recent_unique_ips_list: Vec::new(),
|
||||||
|
total_octets: 0,
|
||||||
|
links: build_user_links(
|
||||||
|
&cfg,
|
||||||
|
&secret,
|
||||||
|
detected_ip_v4,
|
||||||
|
detected_ip_v6,
|
||||||
|
),
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok((CreateUserResponse { user, secret }, revision))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn patch_user(
|
||||||
|
user: &str,
|
||||||
|
body: PatchUserRequest,
|
||||||
|
expected_revision: Option<String>,
|
||||||
|
shared: &ApiShared,
|
||||||
|
) -> Result<(UserInfo, String), ApiFailure> {
|
||||||
|
if let Some(secret) = body.secret.as_ref() && !is_valid_user_secret(secret) {
|
||||||
|
return Err(ApiFailure::bad_request(
|
||||||
|
"secret must be exactly 32 hex characters",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if let Some(ad_tag) = body.user_ad_tag.as_ref() && !is_valid_ad_tag(ad_tag) {
|
||||||
|
return Err(ApiFailure::bad_request(
|
||||||
|
"user_ad_tag must be exactly 32 hex characters",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let expiration = parse_optional_expiration(body.expiration_rfc3339.as_deref())?;
|
||||||
|
let _guard = shared.mutation_lock.lock().await;
|
||||||
|
let mut cfg = load_config_from_disk(&shared.config_path).await?;
|
||||||
|
ensure_expected_revision(&shared.config_path, expected_revision.as_deref()).await?;
|
||||||
|
|
||||||
|
if !cfg.access.users.contains_key(user) {
|
||||||
|
return Err(ApiFailure::new(
|
||||||
|
StatusCode::NOT_FOUND,
|
||||||
|
"not_found",
|
||||||
|
"User not found",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(secret) = body.secret {
|
||||||
|
cfg.access.users.insert(user.to_string(), secret);
|
||||||
|
}
|
||||||
|
if let Some(ad_tag) = body.user_ad_tag {
|
||||||
|
cfg.access.user_ad_tags.insert(user.to_string(), ad_tag);
|
||||||
|
}
|
||||||
|
if let Some(limit) = body.max_tcp_conns {
|
||||||
|
cfg.access.user_max_tcp_conns.insert(user.to_string(), limit);
|
||||||
|
}
|
||||||
|
if let Some(expiration) = expiration {
|
||||||
|
cfg.access.user_expirations.insert(user.to_string(), expiration);
|
||||||
|
}
|
||||||
|
if let Some(quota) = body.data_quota_bytes {
|
||||||
|
cfg.access.user_data_quota.insert(user.to_string(), quota);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut updated_limit = None;
|
||||||
|
if let Some(limit) = body.max_unique_ips {
|
||||||
|
cfg.access.user_max_unique_ips.insert(user.to_string(), limit);
|
||||||
|
updated_limit = Some(limit);
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg.validate()
|
||||||
|
.map_err(|e| ApiFailure::bad_request(format!("config validation failed: {}", e)))?;
|
||||||
|
|
||||||
|
let revision = save_config_to_disk(&shared.config_path, &cfg).await?;
|
||||||
|
drop(_guard);
|
||||||
|
if let Some(limit) = updated_limit {
|
||||||
|
shared.ip_tracker.set_user_limit(user, limit).await;
|
||||||
|
}
|
||||||
|
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||||
|
let users = users_from_config(
|
||||||
|
&cfg,
|
||||||
|
&shared.stats,
|
||||||
|
&shared.ip_tracker,
|
||||||
|
detected_ip_v4,
|
||||||
|
detected_ip_v6,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let user_info = users
|
||||||
|
.into_iter()
|
||||||
|
.find(|entry| entry.username == user)
|
||||||
|
.ok_or_else(|| ApiFailure::internal("failed to build updated user view"))?;
|
||||||
|
|
||||||
|
Ok((user_info, revision))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn rotate_secret(
|
||||||
|
user: &str,
|
||||||
|
body: RotateSecretRequest,
|
||||||
|
expected_revision: Option<String>,
|
||||||
|
shared: &ApiShared,
|
||||||
|
) -> Result<(CreateUserResponse, String), ApiFailure> {
|
||||||
|
let secret = body.secret.unwrap_or_else(random_user_secret);
|
||||||
|
if !is_valid_user_secret(&secret) {
|
||||||
|
return Err(ApiFailure::bad_request(
|
||||||
|
"secret must be exactly 32 hex characters",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let _guard = shared.mutation_lock.lock().await;
|
||||||
|
let mut cfg = load_config_from_disk(&shared.config_path).await?;
|
||||||
|
ensure_expected_revision(&shared.config_path, expected_revision.as_deref()).await?;
|
||||||
|
|
||||||
|
if !cfg.access.users.contains_key(user) {
|
||||||
|
return Err(ApiFailure::new(
|
||||||
|
StatusCode::NOT_FOUND,
|
||||||
|
"not_found",
|
||||||
|
"User not found",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg.access.users.insert(user.to_string(), secret.clone());
|
||||||
|
cfg.validate()
|
||||||
|
.map_err(|e| ApiFailure::bad_request(format!("config validation failed: {}", e)))?;
|
||||||
|
let touched_sections = [
|
||||||
|
AccessSection::Users,
|
||||||
|
AccessSection::UserAdTags,
|
||||||
|
AccessSection::UserMaxTcpConns,
|
||||||
|
AccessSection::UserExpirations,
|
||||||
|
AccessSection::UserDataQuota,
|
||||||
|
AccessSection::UserMaxUniqueIps,
|
||||||
|
];
|
||||||
|
let revision = save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
|
||||||
|
drop(_guard);
|
||||||
|
|
||||||
|
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||||
|
let users = users_from_config(
|
||||||
|
&cfg,
|
||||||
|
&shared.stats,
|
||||||
|
&shared.ip_tracker,
|
||||||
|
detected_ip_v4,
|
||||||
|
detected_ip_v6,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let user_info = users
|
||||||
|
.into_iter()
|
||||||
|
.find(|entry| entry.username == user)
|
||||||
|
.ok_or_else(|| ApiFailure::internal("failed to build updated user view"))?;
|
||||||
|
|
||||||
|
Ok((
|
||||||
|
CreateUserResponse {
|
||||||
|
user: user_info,
|
||||||
|
secret,
|
||||||
|
},
|
||||||
|
revision,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn delete_user(
|
||||||
|
user: &str,
|
||||||
|
expected_revision: Option<String>,
|
||||||
|
shared: &ApiShared,
|
||||||
|
) -> Result<(String, String), ApiFailure> {
|
||||||
|
let _guard = shared.mutation_lock.lock().await;
|
||||||
|
let mut cfg = load_config_from_disk(&shared.config_path).await?;
|
||||||
|
ensure_expected_revision(&shared.config_path, expected_revision.as_deref()).await?;
|
||||||
|
|
||||||
|
if !cfg.access.users.contains_key(user) {
|
||||||
|
return Err(ApiFailure::new(
|
||||||
|
StatusCode::NOT_FOUND,
|
||||||
|
"not_found",
|
||||||
|
"User not found",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if cfg.access.users.len() <= 1 {
|
||||||
|
return Err(ApiFailure::new(
|
||||||
|
StatusCode::CONFLICT,
|
||||||
|
"last_user_forbidden",
|
||||||
|
"Cannot delete the last configured user",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg.access.users.remove(user);
|
||||||
|
cfg.access.user_ad_tags.remove(user);
|
||||||
|
cfg.access.user_max_tcp_conns.remove(user);
|
||||||
|
cfg.access.user_expirations.remove(user);
|
||||||
|
cfg.access.user_data_quota.remove(user);
|
||||||
|
cfg.access.user_max_unique_ips.remove(user);
|
||||||
|
|
||||||
|
cfg.validate()
|
||||||
|
.map_err(|e| ApiFailure::bad_request(format!("config validation failed: {}", e)))?;
|
||||||
|
let touched_sections = [
|
||||||
|
AccessSection::Users,
|
||||||
|
AccessSection::UserAdTags,
|
||||||
|
AccessSection::UserMaxTcpConns,
|
||||||
|
AccessSection::UserExpirations,
|
||||||
|
AccessSection::UserDataQuota,
|
||||||
|
AccessSection::UserMaxUniqueIps,
|
||||||
|
];
|
||||||
|
let revision = save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
|
||||||
|
drop(_guard);
|
||||||
|
shared.ip_tracker.remove_user_limit(user).await;
|
||||||
|
shared.ip_tracker.clear_user_ips(user).await;
|
||||||
|
|
||||||
|
Ok((user.to_string(), revision))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn users_from_config(
|
||||||
|
cfg: &ProxyConfig,
|
||||||
|
stats: &Stats,
|
||||||
|
ip_tracker: &UserIpTracker,
|
||||||
|
startup_detected_ip_v4: Option<IpAddr>,
|
||||||
|
startup_detected_ip_v6: Option<IpAddr>,
|
||||||
|
) -> Vec<UserInfo> {
|
||||||
|
let mut names = cfg.access.users.keys().cloned().collect::<Vec<_>>();
|
||||||
|
names.sort();
|
||||||
|
let active_ip_lists = ip_tracker.get_active_ips_for_users(&names).await;
|
||||||
|
let recent_ip_lists = ip_tracker.get_recent_ips_for_users(&names).await;
|
||||||
|
|
||||||
|
let mut users = Vec::with_capacity(names.len());
|
||||||
|
for username in names {
|
||||||
|
let active_ip_list = active_ip_lists
|
||||||
|
.get(&username)
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_else(Vec::new);
|
||||||
|
let recent_ip_list = recent_ip_lists
|
||||||
|
.get(&username)
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_else(Vec::new);
|
||||||
|
let links = cfg
|
||||||
|
.access
|
||||||
|
.users
|
||||||
|
.get(&username)
|
||||||
|
.map(|secret| {
|
||||||
|
build_user_links(
|
||||||
|
cfg,
|
||||||
|
secret,
|
||||||
|
startup_detected_ip_v4,
|
||||||
|
startup_detected_ip_v6,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.unwrap_or(UserLinks {
|
||||||
|
classic: Vec::new(),
|
||||||
|
secure: Vec::new(),
|
||||||
|
tls: Vec::new(),
|
||||||
|
});
|
||||||
|
users.push(UserInfo {
|
||||||
|
user_ad_tag: cfg.access.user_ad_tags.get(&username).cloned(),
|
||||||
|
max_tcp_conns: cfg.access.user_max_tcp_conns.get(&username).copied(),
|
||||||
|
expiration_rfc3339: cfg
|
||||||
|
.access
|
||||||
|
.user_expirations
|
||||||
|
.get(&username)
|
||||||
|
.map(chrono::DateTime::<chrono::Utc>::to_rfc3339),
|
||||||
|
data_quota_bytes: cfg.access.user_data_quota.get(&username).copied(),
|
||||||
|
max_unique_ips: cfg.access.user_max_unique_ips.get(&username).copied(),
|
||||||
|
current_connections: stats.get_user_curr_connects(&username),
|
||||||
|
active_unique_ips: active_ip_list.len(),
|
||||||
|
active_unique_ips_list: active_ip_list,
|
||||||
|
recent_unique_ips: recent_ip_list.len(),
|
||||||
|
recent_unique_ips_list: recent_ip_list,
|
||||||
|
total_octets: stats.get_user_total_octets(&username),
|
||||||
|
links,
|
||||||
|
username,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
users
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_user_links(
|
||||||
|
cfg: &ProxyConfig,
|
||||||
|
secret: &str,
|
||||||
|
startup_detected_ip_v4: Option<IpAddr>,
|
||||||
|
startup_detected_ip_v6: Option<IpAddr>,
|
||||||
|
) -> UserLinks {
|
||||||
|
let hosts = resolve_link_hosts(cfg, startup_detected_ip_v4, startup_detected_ip_v6);
|
||||||
|
let port = cfg.general.links.public_port.unwrap_or(cfg.server.port);
|
||||||
|
let tls_domains = resolve_tls_domains(cfg);
|
||||||
|
|
||||||
|
let mut classic = Vec::new();
|
||||||
|
let mut secure = Vec::new();
|
||||||
|
let mut tls = Vec::new();
|
||||||
|
|
||||||
|
for host in &hosts {
|
||||||
|
if cfg.general.modes.classic {
|
||||||
|
classic.push(format!(
|
||||||
|
"tg://proxy?server={}&port={}&secret={}",
|
||||||
|
host, port, secret
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if cfg.general.modes.secure {
|
||||||
|
secure.push(format!(
|
||||||
|
"tg://proxy?server={}&port={}&secret=dd{}",
|
||||||
|
host, port, secret
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if cfg.general.modes.tls {
|
||||||
|
for domain in &tls_domains {
|
||||||
|
let domain_hex = hex::encode(domain);
|
||||||
|
tls.push(format!(
|
||||||
|
"tg://proxy?server={}&port={}&secret=ee{}{}",
|
||||||
|
host, port, secret, domain_hex
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
UserLinks {
|
||||||
|
classic,
|
||||||
|
secure,
|
||||||
|
tls,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn resolve_link_hosts(
|
||||||
|
cfg: &ProxyConfig,
|
||||||
|
startup_detected_ip_v4: Option<IpAddr>,
|
||||||
|
startup_detected_ip_v6: Option<IpAddr>,
|
||||||
|
) -> Vec<String> {
|
||||||
|
if let Some(host) = cfg
|
||||||
|
.general
|
||||||
|
.links
|
||||||
|
.public_host
|
||||||
|
.as_deref()
|
||||||
|
.map(str::trim)
|
||||||
|
.filter(|value| !value.is_empty())
|
||||||
|
{
|
||||||
|
return vec![host.to_string()];
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut hosts = Vec::new();
|
||||||
|
for listener in &cfg.server.listeners {
|
||||||
|
if let Some(host) = listener
|
||||||
|
.announce
|
||||||
|
.as_deref()
|
||||||
|
.map(str::trim)
|
||||||
|
.filter(|value| !value.is_empty())
|
||||||
|
{
|
||||||
|
push_unique_host(&mut hosts, host);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if let Some(ip) = listener.announce_ip {
|
||||||
|
if !ip.is_unspecified() {
|
||||||
|
push_unique_host(&mut hosts, &ip.to_string());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if listener.ip.is_unspecified() {
|
||||||
|
let detected_ip = if listener.ip.is_ipv4() {
|
||||||
|
startup_detected_ip_v4
|
||||||
|
} else {
|
||||||
|
startup_detected_ip_v6
|
||||||
|
};
|
||||||
|
if let Some(ip) = detected_ip {
|
||||||
|
push_unique_host(&mut hosts, &ip.to_string());
|
||||||
|
} else {
|
||||||
|
push_unique_host(&mut hosts, &listener.ip.to_string());
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
push_unique_host(&mut hosts, &listener.ip.to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
if !hosts.is_empty() {
|
||||||
|
return hosts;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ip) = startup_detected_ip_v4.or(startup_detected_ip_v6) {
|
||||||
|
return vec![ip.to_string()];
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(host) = cfg.server.listen_addr_ipv4.as_deref() {
|
||||||
|
push_host_from_legacy_listen(&mut hosts, host);
|
||||||
|
}
|
||||||
|
if let Some(host) = cfg.server.listen_addr_ipv6.as_deref() {
|
||||||
|
push_host_from_legacy_listen(&mut hosts, host);
|
||||||
|
}
|
||||||
|
if !hosts.is_empty() {
|
||||||
|
return hosts;
|
||||||
|
}
|
||||||
|
|
||||||
|
vec!["UNKNOWN".to_string()]
|
||||||
|
}
|
||||||
|
|
||||||
|
fn push_host_from_legacy_listen(hosts: &mut Vec<String>, raw: &str) {
|
||||||
|
let candidate = raw.trim();
|
||||||
|
if candidate.is_empty() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
match candidate.parse::<IpAddr>() {
|
||||||
|
Ok(ip) if ip.is_unspecified() => {}
|
||||||
|
Ok(ip) => push_unique_host(hosts, &ip.to_string()),
|
||||||
|
Err(_) => push_unique_host(hosts, candidate),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn push_unique_host(hosts: &mut Vec<String>, candidate: &str) {
|
||||||
|
if !hosts.iter().any(|existing| existing == candidate) {
|
||||||
|
hosts.push(candidate.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn resolve_tls_domains(cfg: &ProxyConfig) -> Vec<&str> {
|
||||||
|
let mut domains = Vec::with_capacity(1 + cfg.censorship.tls_domains.len());
|
||||||
|
let primary = cfg.censorship.tls_domain.as_str();
|
||||||
|
if !primary.is_empty() {
|
||||||
|
domains.push(primary);
|
||||||
|
}
|
||||||
|
for domain in &cfg.censorship.tls_domains {
|
||||||
|
let value = domain.as_str();
|
||||||
|
if value.is_empty() || domains.contains(&value) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
domains.push(value);
|
||||||
|
}
|
||||||
|
domains
|
||||||
|
}
|
||||||
@@ -196,7 +196,10 @@ use_middle_proxy = false
|
|||||||
log_level = "normal"
|
log_level = "normal"
|
||||||
desync_all_full = false
|
desync_all_full = false
|
||||||
update_every = 43200
|
update_every = 43200
|
||||||
me_reinit_drain_timeout_secs = 300
|
hardswap = false
|
||||||
|
me_pool_drain_ttl_secs = 90
|
||||||
|
me_pool_min_fresh_ratio = 0.8
|
||||||
|
me_reinit_drain_timeout_secs = 120
|
||||||
|
|
||||||
[network]
|
[network]
|
||||||
ipv4 = true
|
ipv4 = true
|
||||||
|
|||||||
@@ -1,9 +1,49 @@
|
|||||||
use std::net::IpAddr;
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use ipnetwork::IpNetwork;
|
use ipnetwork::IpNetwork;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
|
|
||||||
// Helper defaults kept private to the config module.
|
// Helper defaults kept private to the config module.
|
||||||
|
const DEFAULT_NETWORK_IPV6: Option<bool> = Some(false);
|
||||||
|
const DEFAULT_STUN_TCP_FALLBACK: bool = true;
|
||||||
|
const DEFAULT_MIDDLE_PROXY_WARM_STANDBY: usize = 16;
|
||||||
|
const DEFAULT_ME_RECONNECT_MAX_CONCURRENT_PER_DC: u32 = 8;
|
||||||
|
const DEFAULT_ME_RECONNECT_FAST_RETRY_COUNT: u32 = 16;
|
||||||
|
const DEFAULT_ME_SINGLE_ENDPOINT_SHADOW_WRITERS: u8 = 2;
|
||||||
|
const DEFAULT_ME_ADAPTIVE_FLOOR_IDLE_SECS: u64 = 90;
|
||||||
|
const DEFAULT_ME_ADAPTIVE_FLOOR_MIN_WRITERS_SINGLE_ENDPOINT: u8 = 1;
|
||||||
|
const DEFAULT_ME_ADAPTIVE_FLOOR_MIN_WRITERS_MULTI_ENDPOINT: u8 = 1;
|
||||||
|
const DEFAULT_ME_ADAPTIVE_FLOOR_RECOVER_GRACE_SECS: u64 = 180;
|
||||||
|
const DEFAULT_ME_ADAPTIVE_FLOOR_WRITERS_PER_CORE_TOTAL: u16 = 48;
|
||||||
|
const DEFAULT_ME_ADAPTIVE_FLOOR_CPU_CORES_OVERRIDE: u16 = 0;
|
||||||
|
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_EXTRA_WRITERS_SINGLE_PER_CORE: u16 = 1;
|
||||||
|
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_EXTRA_WRITERS_MULTI_PER_CORE: u16 = 2;
|
||||||
|
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_ACTIVE_WRITERS_PER_CORE: u16 = 64;
|
||||||
|
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_WARM_WRITERS_PER_CORE: u16 = 64;
|
||||||
|
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_ACTIVE_WRITERS_GLOBAL: u32 = 256;
|
||||||
|
const DEFAULT_ME_ADAPTIVE_FLOOR_MAX_WARM_WRITERS_GLOBAL: u32 = 256;
|
||||||
|
const DEFAULT_ME_WRITER_CMD_CHANNEL_CAPACITY: usize = 4096;
|
||||||
|
const DEFAULT_ME_ROUTE_CHANNEL_CAPACITY: usize = 768;
|
||||||
|
const DEFAULT_ME_C2ME_CHANNEL_CAPACITY: usize = 1024;
|
||||||
|
const DEFAULT_ME_READER_ROUTE_DATA_WAIT_MS: u64 = 2;
|
||||||
|
const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_FRAMES: usize = 32;
|
||||||
|
const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_BYTES: usize = 128 * 1024;
|
||||||
|
const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_DELAY_US: u64 = 1500;
|
||||||
|
const DEFAULT_ME_D2C_ACK_FLUSH_IMMEDIATE: bool = false;
|
||||||
|
const DEFAULT_DIRECT_RELAY_COPY_BUF_C2S_BYTES: usize = 64 * 1024;
|
||||||
|
const DEFAULT_DIRECT_RELAY_COPY_BUF_S2C_BYTES: usize = 256 * 1024;
|
||||||
|
const DEFAULT_ME_WRITER_PICK_SAMPLE_SIZE: u8 = 3;
|
||||||
|
const DEFAULT_ME_HEALTH_INTERVAL_MS_UNHEALTHY: u64 = 1000;
|
||||||
|
const DEFAULT_ME_HEALTH_INTERVAL_MS_HEALTHY: u64 = 3000;
|
||||||
|
const DEFAULT_ME_ADMISSION_POLL_MS: u64 = 1000;
|
||||||
|
const DEFAULT_ME_WARN_RATE_LIMIT_MS: u64 = 5000;
|
||||||
|
const DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS: u64 = 30;
|
||||||
|
const DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS: u32 = 2;
|
||||||
|
const DEFAULT_UPSTREAM_UNHEALTHY_FAIL_THRESHOLD: u32 = 5;
|
||||||
|
const DEFAULT_UPSTREAM_CONNECT_BUDGET_MS: u64 = 3000;
|
||||||
|
const DEFAULT_LISTEN_ADDR_IPV6: &str = "::";
|
||||||
|
const DEFAULT_ACCESS_USER: &str = "default";
|
||||||
|
const DEFAULT_ACCESS_SECRET: &str = "00000000000000000000000000000000";
|
||||||
|
|
||||||
pub(crate) fn default_true() -> bool {
|
pub(crate) fn default_true() -> bool {
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
@@ -13,7 +53,7 @@ pub(crate) fn default_port() -> u16 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn default_tls_domain() -> String {
|
pub(crate) fn default_tls_domain() -> String {
|
||||||
"www.google.com".to_string()
|
"petrovich.ru".to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn default_mask_port() -> u16 {
|
pub(crate) fn default_mask_port() -> u16 {
|
||||||
@@ -37,7 +77,7 @@ pub(crate) fn default_replay_window_secs() -> u64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn default_handshake_timeout() -> u64 {
|
pub(crate) fn default_handshake_timeout() -> u64 {
|
||||||
15
|
30
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn default_connect_timeout() -> u64 {
|
pub(crate) fn default_connect_timeout() -> u64 {
|
||||||
@@ -52,17 +92,21 @@ pub(crate) fn default_ack_timeout() -> u64 {
|
|||||||
300
|
300
|
||||||
}
|
}
|
||||||
pub(crate) fn default_me_one_retry() -> u8 {
|
pub(crate) fn default_me_one_retry() -> u8 {
|
||||||
3
|
12
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn default_me_one_timeout() -> u64 {
|
pub(crate) fn default_me_one_timeout() -> u64 {
|
||||||
1500
|
1200
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn default_listen_addr() -> String {
|
pub(crate) fn default_listen_addr() -> String {
|
||||||
"0.0.0.0".to_string()
|
"0.0.0.0".to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_listen_addr_ipv4() -> Option<String> {
|
||||||
|
Some(default_listen_addr())
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn default_weight() -> u16 {
|
pub(crate) fn default_weight() -> u16 {
|
||||||
1
|
1
|
||||||
}
|
}
|
||||||
@@ -74,24 +118,101 @@ pub(crate) fn default_metrics_whitelist() -> Vec<IpNetwork> {
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_api_listen() -> String {
|
||||||
|
"0.0.0.0:9091".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_api_whitelist() -> Vec<IpNetwork> {
|
||||||
|
vec!["127.0.0.0/8".parse().unwrap()]
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_api_request_body_limit_bytes() -> usize {
|
||||||
|
64 * 1024
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_api_minimal_runtime_enabled() -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_api_minimal_runtime_cache_ttl_ms() -> u64 {
|
||||||
|
1000
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_api_runtime_edge_enabled() -> bool { false }
|
||||||
|
pub(crate) fn default_api_runtime_edge_cache_ttl_ms() -> u64 { 1000 }
|
||||||
|
pub(crate) fn default_api_runtime_edge_top_n() -> usize { 10 }
|
||||||
|
pub(crate) fn default_api_runtime_edge_events_capacity() -> usize { 256 }
|
||||||
|
|
||||||
|
pub(crate) fn default_proxy_protocol_header_timeout_ms() -> u64 {
|
||||||
|
500
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn default_prefer_4() -> u8 {
|
pub(crate) fn default_prefer_4() -> u8 {
|
||||||
4
|
4
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_network_ipv6() -> Option<bool> {
|
||||||
|
DEFAULT_NETWORK_IPV6
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_stun_tcp_fallback() -> bool {
|
||||||
|
DEFAULT_STUN_TCP_FALLBACK
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn default_unknown_dc_log_path() -> Option<String> {
|
pub(crate) fn default_unknown_dc_log_path() -> Option<String> {
|
||||||
Some("unknown-dc.txt".to_string())
|
Some("unknown-dc.txt".to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_unknown_dc_file_log_enabled() -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn default_pool_size() -> usize {
|
pub(crate) fn default_pool_size() -> usize {
|
||||||
2
|
8
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_proxy_secret_path() -> Option<String> {
|
||||||
|
Some("proxy-secret".to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_proxy_config_v4_cache_path() -> Option<String> {
|
||||||
|
Some("cache/proxy-config-v4.txt".to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_proxy_config_v6_cache_path() -> Option<String> {
|
||||||
|
Some("cache/proxy-config-v6.txt".to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_middle_proxy_nat_stun() -> Option<String> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_middle_proxy_nat_stun_servers() -> Vec<String> {
|
||||||
|
Vec::new()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_stun_nat_probe_concurrency() -> usize {
|
||||||
|
8
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_middle_proxy_warm_standby() -> usize {
|
||||||
|
DEFAULT_MIDDLE_PROXY_WARM_STANDBY
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_init_retry_attempts() -> u32 {
|
||||||
|
0
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me2dc_fallback() -> bool {
|
||||||
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn default_keepalive_interval() -> u64 {
|
pub(crate) fn default_keepalive_interval() -> u64 {
|
||||||
25
|
8
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn default_keepalive_jitter() -> u64 {
|
pub(crate) fn default_keepalive_jitter() -> u64 {
|
||||||
5
|
2
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn default_warmup_step_delay_ms() -> u64 {
|
pub(crate) fn default_warmup_step_delay_ms() -> u64 {
|
||||||
@@ -110,6 +231,170 @@ pub(crate) fn default_reconnect_backoff_cap_ms() -> u64 {
|
|||||||
30_000
|
30_000
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_reconnect_max_concurrent_per_dc() -> u32 {
|
||||||
|
DEFAULT_ME_RECONNECT_MAX_CONCURRENT_PER_DC
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_reconnect_fast_retry_count() -> u32 {
|
||||||
|
DEFAULT_ME_RECONNECT_FAST_RETRY_COUNT
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_single_endpoint_shadow_writers() -> u8 {
|
||||||
|
DEFAULT_ME_SINGLE_ENDPOINT_SHADOW_WRITERS
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_single_endpoint_outage_mode_enabled() -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_single_endpoint_outage_disable_quarantine() -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_single_endpoint_outage_backoff_min_ms() -> u64 {
|
||||||
|
250
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_single_endpoint_outage_backoff_max_ms() -> u64 {
|
||||||
|
3000
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_single_endpoint_shadow_rotate_every_secs() -> u64 {
|
||||||
|
900
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_adaptive_floor_idle_secs() -> u64 {
|
||||||
|
DEFAULT_ME_ADAPTIVE_FLOOR_IDLE_SECS
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_adaptive_floor_min_writers_single_endpoint() -> u8 {
|
||||||
|
DEFAULT_ME_ADAPTIVE_FLOOR_MIN_WRITERS_SINGLE_ENDPOINT
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_adaptive_floor_min_writers_multi_endpoint() -> u8 {
|
||||||
|
DEFAULT_ME_ADAPTIVE_FLOOR_MIN_WRITERS_MULTI_ENDPOINT
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_adaptive_floor_recover_grace_secs() -> u64 {
|
||||||
|
DEFAULT_ME_ADAPTIVE_FLOOR_RECOVER_GRACE_SECS
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_adaptive_floor_writers_per_core_total() -> u16 {
|
||||||
|
DEFAULT_ME_ADAPTIVE_FLOOR_WRITERS_PER_CORE_TOTAL
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_adaptive_floor_cpu_cores_override() -> u16 {
|
||||||
|
DEFAULT_ME_ADAPTIVE_FLOOR_CPU_CORES_OVERRIDE
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_adaptive_floor_max_extra_writers_single_per_core() -> u16 {
|
||||||
|
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_EXTRA_WRITERS_SINGLE_PER_CORE
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_adaptive_floor_max_extra_writers_multi_per_core() -> u16 {
|
||||||
|
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_EXTRA_WRITERS_MULTI_PER_CORE
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_adaptive_floor_max_active_writers_per_core() -> u16 {
|
||||||
|
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_ACTIVE_WRITERS_PER_CORE
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_adaptive_floor_max_warm_writers_per_core() -> u16 {
|
||||||
|
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_WARM_WRITERS_PER_CORE
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_adaptive_floor_max_active_writers_global() -> u32 {
|
||||||
|
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_ACTIVE_WRITERS_GLOBAL
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_adaptive_floor_max_warm_writers_global() -> u32 {
|
||||||
|
DEFAULT_ME_ADAPTIVE_FLOOR_MAX_WARM_WRITERS_GLOBAL
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_writer_cmd_channel_capacity() -> usize {
|
||||||
|
DEFAULT_ME_WRITER_CMD_CHANNEL_CAPACITY
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_route_channel_capacity() -> usize {
|
||||||
|
DEFAULT_ME_ROUTE_CHANNEL_CAPACITY
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_c2me_channel_capacity() -> usize {
|
||||||
|
DEFAULT_ME_C2ME_CHANNEL_CAPACITY
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_reader_route_data_wait_ms() -> u64 {
|
||||||
|
DEFAULT_ME_READER_ROUTE_DATA_WAIT_MS
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_d2c_flush_batch_max_frames() -> usize {
|
||||||
|
DEFAULT_ME_D2C_FLUSH_BATCH_MAX_FRAMES
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_d2c_flush_batch_max_bytes() -> usize {
|
||||||
|
DEFAULT_ME_D2C_FLUSH_BATCH_MAX_BYTES
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_d2c_flush_batch_max_delay_us() -> u64 {
|
||||||
|
DEFAULT_ME_D2C_FLUSH_BATCH_MAX_DELAY_US
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_d2c_ack_flush_immediate() -> bool {
|
||||||
|
DEFAULT_ME_D2C_ACK_FLUSH_IMMEDIATE
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_direct_relay_copy_buf_c2s_bytes() -> usize {
|
||||||
|
DEFAULT_DIRECT_RELAY_COPY_BUF_C2S_BYTES
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_direct_relay_copy_buf_s2c_bytes() -> usize {
|
||||||
|
DEFAULT_DIRECT_RELAY_COPY_BUF_S2C_BYTES
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_writer_pick_sample_size() -> u8 {
|
||||||
|
DEFAULT_ME_WRITER_PICK_SAMPLE_SIZE
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_health_interval_ms_unhealthy() -> u64 {
|
||||||
|
DEFAULT_ME_HEALTH_INTERVAL_MS_UNHEALTHY
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_health_interval_ms_healthy() -> u64 {
|
||||||
|
DEFAULT_ME_HEALTH_INTERVAL_MS_HEALTHY
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_admission_poll_ms() -> u64 {
|
||||||
|
DEFAULT_ME_ADMISSION_POLL_MS
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_warn_rate_limit_ms() -> u64 {
|
||||||
|
DEFAULT_ME_WARN_RATE_LIMIT_MS
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_upstream_connect_retry_attempts() -> u32 {
|
||||||
|
DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_upstream_connect_retry_backoff_ms() -> u64 {
|
||||||
|
100
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_upstream_unhealthy_fail_threshold() -> u32 {
|
||||||
|
DEFAULT_UPSTREAM_UNHEALTHY_FAIL_THRESHOLD
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_upstream_connect_budget_ms() -> u64 {
|
||||||
|
DEFAULT_UPSTREAM_CONNECT_BUDGET_MS
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_upstream_connect_failfast_hard_errors() -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_rpc_proxy_req_every() -> u64 {
|
||||||
|
0
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn default_crypto_pending_buffer() -> usize {
|
pub(crate) fn default_crypto_pending_buffer() -> usize {
|
||||||
256 * 1024
|
256 * 1024
|
||||||
}
|
}
|
||||||
@@ -122,6 +407,42 @@ pub(crate) fn default_desync_all_full() -> bool {
|
|||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_route_backpressure_base_timeout_ms() -> u64 {
|
||||||
|
25
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_route_backpressure_high_timeout_ms() -> u64 {
|
||||||
|
120
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_route_backpressure_high_watermark_pct() -> u8 {
|
||||||
|
80
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_route_no_writer_wait_ms() -> u64 {
|
||||||
|
250
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_route_inline_recovery_attempts() -> u32 {
|
||||||
|
3
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_route_inline_recovery_wait_ms() -> u64 {
|
||||||
|
3000
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_beobachten_minutes() -> u64 {
|
||||||
|
10
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_beobachten_flush_secs() -> u64 {
|
||||||
|
15
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_beobachten_file() -> String {
|
||||||
|
"cache/beobachten.txt".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn default_tls_new_session_tickets() -> u8 {
|
pub(crate) fn default_tls_new_session_tickets() -> u8 {
|
||||||
0
|
0
|
||||||
}
|
}
|
||||||
@@ -144,10 +465,18 @@ pub(crate) fn default_alpn_enforce() -> bool {
|
|||||||
|
|
||||||
pub(crate) fn default_stun_servers() -> Vec<String> {
|
pub(crate) fn default_stun_servers() -> Vec<String> {
|
||||||
vec![
|
vec![
|
||||||
|
"stun.l.google.com:5349".to_string(),
|
||||||
|
"stun1.l.google.com:3478".to_string(),
|
||||||
|
"stun.gmx.net:3478".to_string(),
|
||||||
"stun.l.google.com:19302".to_string(),
|
"stun.l.google.com:19302".to_string(),
|
||||||
|
"stun.1und1.de:3478".to_string(),
|
||||||
"stun1.l.google.com:19302".to_string(),
|
"stun1.l.google.com:19302".to_string(),
|
||||||
"stun2.l.google.com:19302".to_string(),
|
"stun2.l.google.com:19302".to_string(),
|
||||||
|
"stun3.l.google.com:19302".to_string(),
|
||||||
|
"stun4.l.google.com:19302".to_string(),
|
||||||
|
"stun.services.mozilla.com:3478".to_string(),
|
||||||
"stun.stunprotocol.org:3478".to_string(),
|
"stun.stunprotocol.org:3478".to_string(),
|
||||||
|
"stun.nextcloud.com:3478".to_string(),
|
||||||
"stun.voip.eutelia.it:3478".to_string(),
|
"stun.voip.eutelia.it:3478".to_string(),
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -164,19 +493,111 @@ pub(crate) fn default_cache_public_ip_path() -> String {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn default_proxy_secret_reload_secs() -> u64 {
|
pub(crate) fn default_proxy_secret_reload_secs() -> u64 {
|
||||||
12 * 60 * 60
|
60 * 60
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn default_proxy_config_reload_secs() -> u64 {
|
pub(crate) fn default_proxy_config_reload_secs() -> u64 {
|
||||||
12 * 60 * 60
|
60 * 60
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn default_update_every_secs() -> u64 {
|
pub(crate) fn default_update_every_secs() -> u64 {
|
||||||
2 * 60 * 60
|
5 * 60
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_update_every() -> Option<u64> {
|
||||||
|
Some(default_update_every_secs())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_reinit_every_secs() -> u64 {
|
||||||
|
15 * 60
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_reinit_singleflight() -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_reinit_trigger_channel() -> usize {
|
||||||
|
64
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_reinit_coalesce_window_ms() -> u64 {
|
||||||
|
200
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_hardswap_warmup_delay_min_ms() -> u64 {
|
||||||
|
1000
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_hardswap_warmup_delay_max_ms() -> u64 {
|
||||||
|
2000
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_hardswap_warmup_extra_passes() -> u8 {
|
||||||
|
3
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_hardswap_warmup_pass_backoff_base_ms() -> u64 {
|
||||||
|
500
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_config_stable_snapshots() -> u8 {
|
||||||
|
2
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_config_apply_cooldown_secs() -> u64 {
|
||||||
|
300
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_snapshot_require_http_2xx() -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_snapshot_reject_empty_map() -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_snapshot_min_proxy_for_lines() -> u32 {
|
||||||
|
1
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_proxy_secret_stable_snapshots() -> u8 {
|
||||||
|
2
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_proxy_secret_rotate_runtime() -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_secret_atomic_snapshot() -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_proxy_secret_len_max() -> usize {
|
||||||
|
256
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn default_me_reinit_drain_timeout_secs() -> u64 {
|
pub(crate) fn default_me_reinit_drain_timeout_secs() -> u64 {
|
||||||
300
|
120
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_pool_drain_ttl_secs() -> u64 {
|
||||||
|
90
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_bind_stale_ttl_secs() -> u64 {
|
||||||
|
default_me_pool_drain_ttl_secs()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_pool_min_fresh_ratio() -> f32 {
|
||||||
|
0.8
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_deterministic_writer_sort() -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_hardswap() -> bool {
|
||||||
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn default_ntp_check() -> bool {
|
pub(crate) fn default_ntp_check() -> bool {
|
||||||
@@ -195,6 +616,25 @@ pub(crate) fn default_degradation_min_unavailable_dc_groups() -> u8 {
|
|||||||
2
|
2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_listen_addr_ipv6() -> String {
|
||||||
|
DEFAULT_LISTEN_ADDR_IPV6.to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_listen_addr_ipv6_opt() -> Option<String> {
|
||||||
|
Some(default_listen_addr_ipv6())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_access_users() -> HashMap<String, String> {
|
||||||
|
HashMap::from([(
|
||||||
|
DEFAULT_ACCESS_USER.to_string(),
|
||||||
|
DEFAULT_ACCESS_SECRET.to_string(),
|
||||||
|
)])
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_user_max_unique_ips_window_secs() -> u64 {
|
||||||
|
DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS
|
||||||
|
}
|
||||||
|
|
||||||
// Custom deserializer helpers
|
// Custom deserializer helpers
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
1561
src/config/load.rs
1561
src/config/load.rs
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -11,6 +11,8 @@
|
|||||||
//! `HandshakeSuccess`, `ObfuscationParams`) are responsible for
|
//! `HandshakeSuccess`, `ObfuscationParams`) are responsible for
|
||||||
//! zeroizing their own copies.
|
//! zeroizing their own copies.
|
||||||
|
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use aes::Aes256;
|
use aes::Aes256;
|
||||||
use ctr::{Ctr128BE, cipher::{KeyIvInit, StreamCipher}};
|
use ctr::{Ctr128BE, cipher::{KeyIvInit, StreamCipher}};
|
||||||
use zeroize::Zeroize;
|
use zeroize::Zeroize;
|
||||||
@@ -147,7 +149,7 @@ impl AesCbc {
|
|||||||
///
|
///
|
||||||
/// CBC Encryption: C[i] = AES_Encrypt(P[i] XOR C[i-1]), where C[-1] = IV
|
/// CBC Encryption: C[i] = AES_Encrypt(P[i] XOR C[i-1]), where C[-1] = IV
|
||||||
pub fn encrypt(&self, data: &[u8]) -> Result<Vec<u8>> {
|
pub fn encrypt(&self, data: &[u8]) -> Result<Vec<u8>> {
|
||||||
if data.len() % Self::BLOCK_SIZE != 0 {
|
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||||
return Err(ProxyError::Crypto(
|
return Err(ProxyError::Crypto(
|
||||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||||
));
|
));
|
||||||
@@ -178,7 +180,7 @@ impl AesCbc {
|
|||||||
///
|
///
|
||||||
/// CBC Decryption: P[i] = AES_Decrypt(C[i]) XOR C[i-1], where C[-1] = IV
|
/// CBC Decryption: P[i] = AES_Decrypt(C[i]) XOR C[i-1], where C[-1] = IV
|
||||||
pub fn decrypt(&self, data: &[u8]) -> Result<Vec<u8>> {
|
pub fn decrypt(&self, data: &[u8]) -> Result<Vec<u8>> {
|
||||||
if data.len() % Self::BLOCK_SIZE != 0 {
|
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||||
return Err(ProxyError::Crypto(
|
return Err(ProxyError::Crypto(
|
||||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||||
));
|
));
|
||||||
@@ -207,7 +209,7 @@ impl AesCbc {
|
|||||||
|
|
||||||
/// Encrypt data in-place
|
/// Encrypt data in-place
|
||||||
pub fn encrypt_in_place(&self, data: &mut [u8]) -> Result<()> {
|
pub fn encrypt_in_place(&self, data: &mut [u8]) -> Result<()> {
|
||||||
if data.len() % Self::BLOCK_SIZE != 0 {
|
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||||
return Err(ProxyError::Crypto(
|
return Err(ProxyError::Crypto(
|
||||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||||
));
|
));
|
||||||
@@ -240,7 +242,7 @@ impl AesCbc {
|
|||||||
|
|
||||||
/// Decrypt data in-place
|
/// Decrypt data in-place
|
||||||
pub fn decrypt_in_place(&self, data: &mut [u8]) -> Result<()> {
|
pub fn decrypt_in_place(&self, data: &mut [u8]) -> Result<()> {
|
||||||
if data.len() % Self::BLOCK_SIZE != 0 {
|
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||||
return Err(ProxyError::Crypto(
|
return Err(ProxyError::Crypto(
|
||||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||||
));
|
));
|
||||||
|
|||||||
@@ -64,6 +64,7 @@ pub fn crc32c(data: &[u8]) -> u32 {
|
|||||||
///
|
///
|
||||||
/// Returned buffer layout (IPv4):
|
/// Returned buffer layout (IPv4):
|
||||||
/// nonce_srv | nonce_clt | clt_ts | srv_ip | clt_port | purpose | clt_ip | srv_port | secret | nonce_srv | [clt_v6 | srv_v6] | nonce_clt
|
/// nonce_srv | nonce_clt | clt_ts | srv_ip | clt_port | purpose | clt_ip | srv_port | secret | nonce_srv | [clt_v6 | srv_v6] | nonce_clt
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn build_middleproxy_prekey(
|
pub fn build_middleproxy_prekey(
|
||||||
nonce_srv: &[u8; 16],
|
nonce_srv: &[u8; 16],
|
||||||
nonce_clt: &[u8; 16],
|
nonce_clt: &[u8; 16],
|
||||||
@@ -108,6 +109,7 @@ pub fn build_middleproxy_prekey(
|
|||||||
/// Uses MD5 + SHA-1 as mandated by the Telegram Middle Proxy protocol.
|
/// Uses MD5 + SHA-1 as mandated by the Telegram Middle Proxy protocol.
|
||||||
/// These algorithms are NOT replaceable here — changing them would break
|
/// These algorithms are NOT replaceable here — changing them would break
|
||||||
/// interoperability with Telegram's middle proxy infrastructure.
|
/// interoperability with Telegram's middle proxy infrastructure.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn derive_middleproxy_keys(
|
pub fn derive_middleproxy_keys(
|
||||||
nonce_srv: &[u8; 16],
|
nonce_srv: &[u8; 16],
|
||||||
nonce_clt: &[u8; 16],
|
nonce_clt: &[u8; 16],
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ pub mod random;
|
|||||||
|
|
||||||
pub use aes::{AesCtr, AesCbc};
|
pub use aes::{AesCtr, AesCbc};
|
||||||
pub use hash::{
|
pub use hash::{
|
||||||
build_middleproxy_prekey, crc32, crc32c, derive_middleproxy_keys, md5, sha1, sha256,
|
build_middleproxy_prekey, crc32, crc32c, derive_middleproxy_keys, sha256, sha256_hmac,
|
||||||
sha256_hmac,
|
|
||||||
};
|
};
|
||||||
pub use random::SecureRandom;
|
pub use random::SecureRandom;
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
//! Pseudorandom
|
//! Pseudorandom
|
||||||
|
|
||||||
|
#![allow(deprecated)]
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use rand::{Rng, RngCore, SeedableRng};
|
use rand::{Rng, RngCore, SeedableRng};
|
||||||
use rand::rngs::StdRng;
|
use rand::rngs::StdRng;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
@@ -18,6 +21,7 @@ struct SecureRandomInner {
|
|||||||
rng: StdRng,
|
rng: StdRng,
|
||||||
cipher: AesCtr,
|
cipher: AesCtr,
|
||||||
buffer: Vec<u8>,
|
buffer: Vec<u8>,
|
||||||
|
buffer_start: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for SecureRandomInner {
|
impl Drop for SecureRandomInner {
|
||||||
@@ -45,6 +49,7 @@ impl SecureRandom {
|
|||||||
rng,
|
rng,
|
||||||
cipher,
|
cipher,
|
||||||
buffer: Vec::with_capacity(1024),
|
buffer: Vec::with_capacity(1024),
|
||||||
|
buffer_start: 0,
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -56,16 +61,29 @@ impl SecureRandom {
|
|||||||
|
|
||||||
let mut written = 0usize;
|
let mut written = 0usize;
|
||||||
while written < out.len() {
|
while written < out.len() {
|
||||||
|
if inner.buffer_start >= inner.buffer.len() {
|
||||||
|
inner.buffer.clear();
|
||||||
|
inner.buffer_start = 0;
|
||||||
|
}
|
||||||
|
|
||||||
if inner.buffer.is_empty() {
|
if inner.buffer.is_empty() {
|
||||||
let mut chunk = vec![0u8; CHUNK_SIZE];
|
let mut chunk = vec![0u8; CHUNK_SIZE];
|
||||||
inner.rng.fill_bytes(&mut chunk);
|
inner.rng.fill_bytes(&mut chunk);
|
||||||
inner.cipher.apply(&mut chunk);
|
inner.cipher.apply(&mut chunk);
|
||||||
inner.buffer.extend_from_slice(&chunk);
|
inner.buffer.extend_from_slice(&chunk);
|
||||||
|
inner.buffer_start = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
let take = (out.len() - written).min(inner.buffer.len());
|
let available = inner.buffer.len().saturating_sub(inner.buffer_start);
|
||||||
out[written..written + take].copy_from_slice(&inner.buffer[..take]);
|
let take = (out.len() - written).min(available);
|
||||||
inner.buffer.drain(..take);
|
let start = inner.buffer_start;
|
||||||
|
let end = start + take;
|
||||||
|
out[written..written + take].copy_from_slice(&inner.buffer[start..end]);
|
||||||
|
inner.buffer_start = end;
|
||||||
|
if inner.buffer_start >= inner.buffer.len() {
|
||||||
|
inner.buffer.clear();
|
||||||
|
inner.buffer_start = 0;
|
||||||
|
}
|
||||||
written += take;
|
written += take;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -92,7 +110,7 @@ impl SecureRandom {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
let bytes_needed = (k + 7) / 8;
|
let bytes_needed = k.div_ceil(8);
|
||||||
let bytes = self.bytes(bytes_needed.min(8));
|
let bytes = self.bytes(bytes_needed.min(8));
|
||||||
|
|
||||||
let mut result = 0u64;
|
let mut result = 0u64;
|
||||||
|
|||||||
13
src/error.rs
13
src/error.rs
@@ -1,5 +1,7 @@
|
|||||||
//! Error Types
|
//! Error Types
|
||||||
|
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
@@ -89,7 +91,7 @@ impl From<StreamError> for std::io::Error {
|
|||||||
std::io::Error::new(std::io::ErrorKind::UnexpectedEof, err)
|
std::io::Error::new(std::io::ErrorKind::UnexpectedEof, err)
|
||||||
}
|
}
|
||||||
StreamError::Poisoned { .. } => {
|
StreamError::Poisoned { .. } => {
|
||||||
std::io::Error::new(std::io::ErrorKind::Other, err)
|
std::io::Error::other(err)
|
||||||
}
|
}
|
||||||
StreamError::BufferOverflow { .. } => {
|
StreamError::BufferOverflow { .. } => {
|
||||||
std::io::Error::new(std::io::ErrorKind::OutOfMemory, err)
|
std::io::Error::new(std::io::ErrorKind::OutOfMemory, err)
|
||||||
@@ -98,7 +100,7 @@ impl From<StreamError> for std::io::Error {
|
|||||||
std::io::Error::new(std::io::ErrorKind::InvalidData, err)
|
std::io::Error::new(std::io::ErrorKind::InvalidData, err)
|
||||||
}
|
}
|
||||||
StreamError::PartialRead { .. } | StreamError::PartialWrite { .. } => {
|
StreamError::PartialRead { .. } | StreamError::PartialWrite { .. } => {
|
||||||
std::io::Error::new(std::io::ErrorKind::Other, err)
|
std::io::Error::other(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -133,12 +135,7 @@ impl Recoverable for StreamError {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn can_continue(&self) -> bool {
|
fn can_continue(&self) -> bool {
|
||||||
match self {
|
!matches!(self, Self::Poisoned { .. } | Self::UnexpectedEof | Self::BufferOverflow { .. })
|
||||||
Self::Poisoned { .. } => false,
|
|
||||||
Self::UnexpectedEof => false,
|
|
||||||
Self::BufferOverflow { .. } => false,
|
|
||||||
_ => true,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,226 +1,302 @@
|
|||||||
// src/ip_tracker.rs
|
// IP address tracking and per-user unique IP limiting.
|
||||||
// Модуль для отслеживания и ограничения уникальных IP-адресов пользователей
|
|
||||||
|
|
||||||
use std::collections::{HashMap, HashSet};
|
#![allow(dead_code)]
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::net::IpAddr;
|
use std::net::IpAddr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
/// Трекер уникальных IP-адресов для каждого пользователя MTProxy
|
use crate::config::UserMaxUniqueIpsMode;
|
||||||
///
|
|
||||||
/// Предоставляет thread-safe механизм для:
|
|
||||||
/// - Отслеживания активных IP-адресов каждого пользователя
|
|
||||||
/// - Ограничения количества уникальных IP на пользователя
|
|
||||||
/// - Автоматической очистки при отключении клиентов
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct UserIpTracker {
|
pub struct UserIpTracker {
|
||||||
/// Маппинг: Имя пользователя -> Множество активных IP-адресов
|
active_ips: Arc<RwLock<HashMap<String, HashMap<IpAddr, usize>>>>,
|
||||||
active_ips: Arc<RwLock<HashMap<String, HashSet<IpAddr>>>>,
|
recent_ips: Arc<RwLock<HashMap<String, HashMap<IpAddr, Instant>>>>,
|
||||||
|
|
||||||
/// Маппинг: Имя пользователя -> Максимально разрешенное количество уникальных IP
|
|
||||||
max_ips: Arc<RwLock<HashMap<String, usize>>>,
|
max_ips: Arc<RwLock<HashMap<String, usize>>>,
|
||||||
|
limit_mode: Arc<RwLock<UserMaxUniqueIpsMode>>,
|
||||||
|
limit_window: Arc<RwLock<Duration>>,
|
||||||
|
last_compact_epoch_secs: Arc<AtomicU64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl UserIpTracker {
|
impl UserIpTracker {
|
||||||
/// Создать новый пустой трекер
|
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
active_ips: Arc::new(RwLock::new(HashMap::new())),
|
active_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||||
|
recent_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||||
max_ips: Arc::new(RwLock::new(HashMap::new())),
|
max_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||||
|
limit_mode: Arc::new(RwLock::new(UserMaxUniqueIpsMode::ActiveWindow)),
|
||||||
|
limit_window: Arc::new(RwLock::new(Duration::from_secs(30))),
|
||||||
|
last_compact_epoch_secs: Arc::new(AtomicU64::new(0)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Установить лимит уникальных IP для конкретного пользователя
|
fn now_epoch_secs() -> u64 {
|
||||||
///
|
std::time::SystemTime::now()
|
||||||
/// # Arguments
|
.duration_since(std::time::UNIX_EPOCH)
|
||||||
/// * `username` - Имя пользователя
|
.unwrap_or_default()
|
||||||
/// * `max_ips` - Максимальное количество одновременно активных IP-адресов
|
.as_secs()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn maybe_compact_empty_users(&self) {
|
||||||
|
const COMPACT_INTERVAL_SECS: u64 = 60;
|
||||||
|
let now_epoch_secs = Self::now_epoch_secs();
|
||||||
|
let last_compact_epoch_secs = self.last_compact_epoch_secs.load(Ordering::Relaxed);
|
||||||
|
if now_epoch_secs.saturating_sub(last_compact_epoch_secs) < COMPACT_INTERVAL_SECS {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if self
|
||||||
|
.last_compact_epoch_secs
|
||||||
|
.compare_exchange(
|
||||||
|
last_compact_epoch_secs,
|
||||||
|
now_epoch_secs,
|
||||||
|
Ordering::AcqRel,
|
||||||
|
Ordering::Relaxed,
|
||||||
|
)
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut active_ips = self.active_ips.write().await;
|
||||||
|
let mut recent_ips = self.recent_ips.write().await;
|
||||||
|
let mut users = Vec::<String>::with_capacity(active_ips.len().saturating_add(recent_ips.len()));
|
||||||
|
users.extend(active_ips.keys().cloned());
|
||||||
|
for user in recent_ips.keys() {
|
||||||
|
if !active_ips.contains_key(user) {
|
||||||
|
users.push(user.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for user in users {
|
||||||
|
let active_empty = active_ips.get(&user).map(|ips| ips.is_empty()).unwrap_or(true);
|
||||||
|
let recent_empty = recent_ips.get(&user).map(|ips| ips.is_empty()).unwrap_or(true);
|
||||||
|
if active_empty && recent_empty {
|
||||||
|
active_ips.remove(&user);
|
||||||
|
recent_ips.remove(&user);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn set_limit_policy(&self, mode: UserMaxUniqueIpsMode, window_secs: u64) {
|
||||||
|
{
|
||||||
|
let mut current_mode = self.limit_mode.write().await;
|
||||||
|
*current_mode = mode;
|
||||||
|
}
|
||||||
|
let mut current_window = self.limit_window.write().await;
|
||||||
|
*current_window = Duration::from_secs(window_secs.max(1));
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn set_user_limit(&self, username: &str, max_ips: usize) {
|
pub async fn set_user_limit(&self, username: &str, max_ips: usize) {
|
||||||
let mut limits = self.max_ips.write().await;
|
let mut limits = self.max_ips.write().await;
|
||||||
limits.insert(username.to_string(), max_ips);
|
limits.insert(username.to_string(), max_ips);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Загрузить лимиты из конфигурации
|
pub async fn remove_user_limit(&self, username: &str) {
|
||||||
///
|
let mut limits = self.max_ips.write().await;
|
||||||
/// # Arguments
|
limits.remove(username);
|
||||||
/// * `limits` - HashMap с лимитами из config.toml
|
}
|
||||||
|
|
||||||
pub async fn load_limits(&self, limits: &HashMap<String, usize>) {
|
pub async fn load_limits(&self, limits: &HashMap<String, usize>) {
|
||||||
let mut max_ips = self.max_ips.write().await;
|
let mut max_ips = self.max_ips.write().await;
|
||||||
for (user, limit) in limits {
|
max_ips.clone_from(limits);
|
||||||
max_ips.insert(user.clone(), *limit);
|
}
|
||||||
}
|
|
||||||
|
fn prune_recent(user_recent: &mut HashMap<IpAddr, Instant>, now: Instant, window: Duration) {
|
||||||
|
if user_recent.is_empty() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
user_recent.retain(|_, seen_at| now.duration_since(*seen_at) <= window);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Проверить, может ли пользователь подключиться с данного IP-адреса
|
|
||||||
/// и добавить IP в список активных, если проверка успешна
|
|
||||||
///
|
|
||||||
/// # Arguments
|
|
||||||
/// * `username` - Имя пользователя
|
|
||||||
/// * `ip` - IP-адрес клиента
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// * `Ok(())` - Подключение разрешено, IP добавлен в активные
|
|
||||||
/// * `Err(String)` - Подключение отклонено с описанием причины
|
|
||||||
pub async fn check_and_add(&self, username: &str, ip: IpAddr) -> Result<(), String> {
|
pub async fn check_and_add(&self, username: &str, ip: IpAddr) -> Result<(), String> {
|
||||||
// Получаем лимит для пользователя
|
self.maybe_compact_empty_users().await;
|
||||||
|
let limit = {
|
||||||
let max_ips = self.max_ips.read().await;
|
let max_ips = self.max_ips.read().await;
|
||||||
let limit = match max_ips.get(username) {
|
max_ips.get(username).copied()
|
||||||
Some(limit) => *limit,
|
|
||||||
None => {
|
|
||||||
// Если лимит не задан - разрешаем безлимитный доступ
|
|
||||||
drop(max_ips);
|
|
||||||
let mut active_ips = self.active_ips.write().await;
|
|
||||||
let user_ips = active_ips
|
|
||||||
.entry(username.to_string())
|
|
||||||
.or_insert_with(HashSet::new);
|
|
||||||
user_ips.insert(ip);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
drop(max_ips);
|
let mode = *self.limit_mode.read().await;
|
||||||
|
let window = *self.limit_window.read().await;
|
||||||
|
let now = Instant::now();
|
||||||
|
|
||||||
// Проверяем и обновляем активные IP
|
|
||||||
let mut active_ips = self.active_ips.write().await;
|
let mut active_ips = self.active_ips.write().await;
|
||||||
let user_ips = active_ips
|
let user_active = active_ips
|
||||||
.entry(username.to_string())
|
.entry(username.to_string())
|
||||||
.or_insert_with(HashSet::new);
|
.or_insert_with(HashMap::new);
|
||||||
|
|
||||||
// Если IP уже есть в списке - это повторное подключение, разрешаем
|
let mut recent_ips = self.recent_ips.write().await;
|
||||||
if user_ips.contains(&ip) {
|
let user_recent = recent_ips
|
||||||
|
.entry(username.to_string())
|
||||||
|
.or_insert_with(HashMap::new);
|
||||||
|
Self::prune_recent(user_recent, now, window);
|
||||||
|
|
||||||
|
if let Some(count) = user_active.get_mut(&ip) {
|
||||||
|
*count = count.saturating_add(1);
|
||||||
|
user_recent.insert(ip, now);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Проверяем, не превышен ли лимит
|
if let Some(limit) = limit {
|
||||||
if user_ips.len() >= limit {
|
let active_limit_reached = user_active.len() >= limit;
|
||||||
|
let recent_limit_reached = user_recent.len() >= limit;
|
||||||
|
let deny = match mode {
|
||||||
|
UserMaxUniqueIpsMode::ActiveWindow => active_limit_reached,
|
||||||
|
UserMaxUniqueIpsMode::TimeWindow => recent_limit_reached,
|
||||||
|
UserMaxUniqueIpsMode::Combined => active_limit_reached || recent_limit_reached,
|
||||||
|
};
|
||||||
|
|
||||||
|
if deny {
|
||||||
return Err(format!(
|
return Err(format!(
|
||||||
"IP limit reached for user '{}': {}/{} unique IPs already connected",
|
"IP limit reached for user '{}': active={}/{} recent={}/{} mode={:?}",
|
||||||
username,
|
username,
|
||||||
user_ips.len(),
|
user_active.len(),
|
||||||
limit
|
limit,
|
||||||
|
user_recent.len(),
|
||||||
|
limit,
|
||||||
|
mode
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Лимит не превышен - добавляем новый IP
|
user_active.insert(ip, 1);
|
||||||
user_ips.insert(ip);
|
user_recent.insert(ip, now);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Удалить IP-адрес из списка активных при отключении клиента
|
|
||||||
///
|
|
||||||
/// # Arguments
|
|
||||||
/// * `username` - Имя пользователя
|
|
||||||
/// * `ip` - IP-адрес отключившегося клиента
|
|
||||||
pub async fn remove_ip(&self, username: &str, ip: IpAddr) {
|
pub async fn remove_ip(&self, username: &str, ip: IpAddr) {
|
||||||
|
self.maybe_compact_empty_users().await;
|
||||||
let mut active_ips = self.active_ips.write().await;
|
let mut active_ips = self.active_ips.write().await;
|
||||||
|
|
||||||
if let Some(user_ips) = active_ips.get_mut(username) {
|
if let Some(user_ips) = active_ips.get_mut(username) {
|
||||||
|
if let Some(count) = user_ips.get_mut(&ip) {
|
||||||
|
if *count > 1 {
|
||||||
|
*count -= 1;
|
||||||
|
} else {
|
||||||
user_ips.remove(&ip);
|
user_ips.remove(&ip);
|
||||||
|
}
|
||||||
// Если у пользователя не осталось активных IP - удаляем запись
|
}
|
||||||
// для экономии памяти
|
|
||||||
if user_ips.is_empty() {
|
if user_ips.is_empty() {
|
||||||
active_ips.remove(username);
|
active_ips.remove(username);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Получить текущее количество активных IP-адресов для пользователя
|
pub async fn get_recent_counts_for_users(&self, users: &[String]) -> HashMap<String, usize> {
|
||||||
///
|
let window = *self.limit_window.read().await;
|
||||||
/// # Arguments
|
let now = Instant::now();
|
||||||
/// * `username` - Имя пользователя
|
let recent_ips = self.recent_ips.read().await;
|
||||||
///
|
|
||||||
/// # Returns
|
let mut counts = HashMap::with_capacity(users.len());
|
||||||
/// Количество уникальных активных IP-адресов
|
for user in users {
|
||||||
pub async fn get_active_ip_count(&self, username: &str) -> usize {
|
let count = if let Some(user_recent) = recent_ips.get(user) {
|
||||||
let active_ips = self.active_ips.read().await;
|
user_recent
|
||||||
active_ips
|
.values()
|
||||||
.get(username)
|
.filter(|seen_at| now.duration_since(**seen_at) <= window)
|
||||||
.map(|ips| ips.len())
|
.count()
|
||||||
.unwrap_or(0)
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
counts.insert(user.clone(), count);
|
||||||
|
}
|
||||||
|
counts
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_active_ips_for_users(&self, users: &[String]) -> HashMap<String, Vec<IpAddr>> {
|
||||||
|
let active_ips = self.active_ips.read().await;
|
||||||
|
let mut out = HashMap::with_capacity(users.len());
|
||||||
|
for user in users {
|
||||||
|
let mut ips = active_ips
|
||||||
|
.get(user)
|
||||||
|
.map(|per_ip| per_ip.keys().copied().collect::<Vec<_>>())
|
||||||
|
.unwrap_or_else(Vec::new);
|
||||||
|
ips.sort();
|
||||||
|
out.insert(user.clone(), ips);
|
||||||
|
}
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_recent_ips_for_users(&self, users: &[String]) -> HashMap<String, Vec<IpAddr>> {
|
||||||
|
let window = *self.limit_window.read().await;
|
||||||
|
let now = Instant::now();
|
||||||
|
let recent_ips = self.recent_ips.read().await;
|
||||||
|
|
||||||
|
let mut out = HashMap::with_capacity(users.len());
|
||||||
|
for user in users {
|
||||||
|
let mut ips = if let Some(user_recent) = recent_ips.get(user) {
|
||||||
|
user_recent
|
||||||
|
.iter()
|
||||||
|
.filter(|(_, seen_at)| now.duration_since(**seen_at) <= window)
|
||||||
|
.map(|(ip, _)| *ip)
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
} else {
|
||||||
|
Vec::new()
|
||||||
|
};
|
||||||
|
ips.sort();
|
||||||
|
out.insert(user.clone(), ips);
|
||||||
|
}
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_active_ip_count(&self, username: &str) -> usize {
|
||||||
|
let active_ips = self.active_ips.read().await;
|
||||||
|
active_ips.get(username).map(|ips| ips.len()).unwrap_or(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Получить список всех активных IP-адресов для пользователя
|
|
||||||
///
|
|
||||||
/// # Arguments
|
|
||||||
/// * `username` - Имя пользователя
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// Вектор с активными IP-адресами
|
|
||||||
pub async fn get_active_ips(&self, username: &str) -> Vec<IpAddr> {
|
pub async fn get_active_ips(&self, username: &str) -> Vec<IpAddr> {
|
||||||
let active_ips = self.active_ips.read().await;
|
let active_ips = self.active_ips.read().await;
|
||||||
active_ips
|
active_ips
|
||||||
.get(username)
|
.get(username)
|
||||||
.map(|ips| ips.iter().copied().collect())
|
.map(|ips| ips.keys().copied().collect())
|
||||||
.unwrap_or_else(Vec::new)
|
.unwrap_or_else(Vec::new)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Получить статистику по всем пользователям
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// Вектор кортежей: (имя_пользователя, количество_активных_IP, лимит)
|
|
||||||
pub async fn get_stats(&self) -> Vec<(String, usize, usize)> {
|
pub async fn get_stats(&self) -> Vec<(String, usize, usize)> {
|
||||||
let active_ips = self.active_ips.read().await;
|
let active_ips = self.active_ips.read().await;
|
||||||
let max_ips = self.max_ips.read().await;
|
let max_ips = self.max_ips.read().await;
|
||||||
|
|
||||||
let mut stats = Vec::new();
|
let mut stats = Vec::new();
|
||||||
|
|
||||||
// Собираем статистику по пользователям с активными подключениями
|
|
||||||
for (username, user_ips) in active_ips.iter() {
|
for (username, user_ips) in active_ips.iter() {
|
||||||
let limit = max_ips.get(username).copied().unwrap_or(0);
|
let limit = max_ips.get(username).copied().unwrap_or(0);
|
||||||
stats.push((username.clone(), user_ips.len(), limit));
|
stats.push((username.clone(), user_ips.len(), limit));
|
||||||
}
|
}
|
||||||
|
|
||||||
stats.sort_by(|a, b| a.0.cmp(&b.0)); // Сортируем по имени пользователя
|
stats.sort_by(|a, b| a.0.cmp(&b.0));
|
||||||
stats
|
stats
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Очистить все активные IP для пользователя (при необходимости)
|
|
||||||
///
|
|
||||||
/// # Arguments
|
|
||||||
/// * `username` - Имя пользователя
|
|
||||||
pub async fn clear_user_ips(&self, username: &str) {
|
pub async fn clear_user_ips(&self, username: &str) {
|
||||||
let mut active_ips = self.active_ips.write().await;
|
let mut active_ips = self.active_ips.write().await;
|
||||||
active_ips.remove(username);
|
active_ips.remove(username);
|
||||||
|
drop(active_ips);
|
||||||
|
|
||||||
|
let mut recent_ips = self.recent_ips.write().await;
|
||||||
|
recent_ips.remove(username);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Очистить всю статистику (использовать с осторожностью!)
|
|
||||||
pub async fn clear_all(&self) {
|
pub async fn clear_all(&self) {
|
||||||
let mut active_ips = self.active_ips.write().await;
|
let mut active_ips = self.active_ips.write().await;
|
||||||
active_ips.clear();
|
active_ips.clear();
|
||||||
|
drop(active_ips);
|
||||||
|
|
||||||
|
let mut recent_ips = self.recent_ips.write().await;
|
||||||
|
recent_ips.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Проверить, подключен ли пользователь с данного IP
|
|
||||||
///
|
|
||||||
/// # Arguments
|
|
||||||
/// * `username` - Имя пользователя
|
|
||||||
/// * `ip` - IP-адрес для проверки
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// `true` если IP активен, `false` если нет
|
|
||||||
pub async fn is_ip_active(&self, username: &str, ip: IpAddr) -> bool {
|
pub async fn is_ip_active(&self, username: &str, ip: IpAddr) -> bool {
|
||||||
let active_ips = self.active_ips.read().await;
|
let active_ips = self.active_ips.read().await;
|
||||||
active_ips
|
active_ips
|
||||||
.get(username)
|
.get(username)
|
||||||
.map(|ips| ips.contains(&ip))
|
.map(|ips| ips.contains_key(&ip))
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Получить лимит для пользователя
|
|
||||||
///
|
|
||||||
/// # Arguments
|
|
||||||
/// * `username` - Имя пользователя
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// Лимит IP-адресов или None, если лимит не установлен
|
|
||||||
pub async fn get_user_limit(&self, username: &str) -> Option<usize> {
|
pub async fn get_user_limit(&self, username: &str) -> Option<usize> {
|
||||||
let max_ips = self.max_ips.read().await;
|
let max_ips = self.max_ips.read().await;
|
||||||
max_ips.get(username).copied()
|
max_ips.get(username).copied()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Форматировать статистику в читаемый текст
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// Строка со статистикой для логов или мониторинга
|
|
||||||
pub async fn format_stats(&self) -> String {
|
pub async fn format_stats(&self) -> String {
|
||||||
let stats = self.get_stats().await;
|
let stats = self.get_stats().await;
|
||||||
|
|
||||||
@@ -236,12 +312,16 @@ impl UserIpTracker {
|
|||||||
"User: {:<20} Active IPs: {}/{}\n",
|
"User: {:<20} Active IPs: {}/{}\n",
|
||||||
username,
|
username,
|
||||||
active_count,
|
active_count,
|
||||||
if limit > 0 { limit.to_string() } else { "unlimited".to_string() }
|
if limit > 0 {
|
||||||
|
limit.to_string()
|
||||||
|
} else {
|
||||||
|
"unlimited".to_string()
|
||||||
|
}
|
||||||
));
|
));
|
||||||
|
|
||||||
let ips = self.get_active_ips(&username).await;
|
let ips = self.get_active_ips(&username).await;
|
||||||
for ip in ips {
|
for ip in ips {
|
||||||
output.push_str(&format!(" └─ {}\n", ip));
|
output.push_str(&format!(" - {}\n", ip));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -255,10 +335,6 @@ impl Default for UserIpTracker {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ============================================================================
|
|
||||||
// ТЕСТЫ
|
|
||||||
// ============================================================================
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -281,17 +357,33 @@ mod tests {
|
|||||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||||
let ip3 = test_ipv4(192, 168, 1, 3);
|
let ip3 = test_ipv4(192, 168, 1, 3);
|
||||||
|
|
||||||
// Первые два IP должны быть приняты
|
|
||||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||||
|
|
||||||
// Третий IP должен быть отклонен
|
|
||||||
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
|
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
|
||||||
|
|
||||||
// Проверяем счетчик
|
|
||||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
|
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_active_window_rejects_new_ip_and_keeps_existing_session() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("test_user", 1).await;
|
||||||
|
tracker
|
||||||
|
.set_limit_policy(UserMaxUniqueIpsMode::ActiveWindow, 30)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let ip1 = test_ipv4(10, 10, 10, 1);
|
||||||
|
let ip2 = test_ipv4(10, 10, 10, 2);
|
||||||
|
|
||||||
|
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||||
|
assert!(tracker.is_ip_active("test_user", ip1).await);
|
||||||
|
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||||
|
|
||||||
|
// Existing session remains active; only new unique IP is denied.
|
||||||
|
assert!(tracker.is_ip_active("test_user", ip1).await);
|
||||||
|
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_reconnection_from_same_ip() {
|
async fn test_reconnection_from_same_ip() {
|
||||||
let tracker = UserIpTracker::new();
|
let tracker = UserIpTracker::new();
|
||||||
@@ -299,16 +391,29 @@ mod tests {
|
|||||||
|
|
||||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||||
|
|
||||||
// Первое подключение
|
|
||||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||||
|
|
||||||
// Повторное подключение с того же IP должно пройти
|
|
||||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||||
|
|
||||||
// Счетчик не должен увеличиться
|
|
||||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_same_ip_disconnect_keeps_active_while_other_session_alive() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("test_user", 2).await;
|
||||||
|
|
||||||
|
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||||
|
|
||||||
|
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||||
|
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||||
|
|
||||||
|
tracker.remove_ip("test_user", ip1).await;
|
||||||
|
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||||
|
|
||||||
|
tracker.remove_ip("test_user", ip1).await;
|
||||||
|
assert_eq!(tracker.get_active_ip_count("test_user").await, 0);
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_ip_removal() {
|
async fn test_ip_removal() {
|
||||||
let tracker = UserIpTracker::new();
|
let tracker = UserIpTracker::new();
|
||||||
@@ -318,32 +423,24 @@ mod tests {
|
|||||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||||
let ip3 = test_ipv4(192, 168, 1, 3);
|
let ip3 = test_ipv4(192, 168, 1, 3);
|
||||||
|
|
||||||
// Добавляем два IP
|
|
||||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||||
|
|
||||||
// Третий не должен пройти
|
|
||||||
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
|
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
|
||||||
|
|
||||||
// Удаляем первый IP
|
|
||||||
tracker.remove_ip("test_user", ip1).await;
|
tracker.remove_ip("test_user", ip1).await;
|
||||||
|
|
||||||
// Теперь третий должен пройти
|
|
||||||
assert!(tracker.check_and_add("test_user", ip3).await.is_ok());
|
assert!(tracker.check_and_add("test_user", ip3).await.is_ok());
|
||||||
|
|
||||||
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
|
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_no_limit() {
|
async fn test_no_limit() {
|
||||||
let tracker = UserIpTracker::new();
|
let tracker = UserIpTracker::new();
|
||||||
// Не устанавливаем лимит для test_user
|
|
||||||
|
|
||||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||||
let ip3 = test_ipv4(192, 168, 1, 3);
|
let ip3 = test_ipv4(192, 168, 1, 3);
|
||||||
|
|
||||||
// Без лимита все IP должны проходить
|
|
||||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||||
assert!(tracker.check_and_add("test_user", ip3).await.is_ok());
|
assert!(tracker.check_and_add("test_user", ip3).await.is_ok());
|
||||||
@@ -360,11 +457,9 @@ mod tests {
|
|||||||
let ip1 = test_ipv4(192, 168, 1, 1);
|
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||||
let ip2 = test_ipv4(192, 168, 1, 2);
|
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||||
|
|
||||||
// user1 может использовать 2 IP
|
|
||||||
assert!(tracker.check_and_add("user1", ip1).await.is_ok());
|
assert!(tracker.check_and_add("user1", ip1).await.is_ok());
|
||||||
assert!(tracker.check_and_add("user1", ip2).await.is_ok());
|
assert!(tracker.check_and_add("user1", ip2).await.is_ok());
|
||||||
|
|
||||||
// user2 может использовать только 1 IP
|
|
||||||
assert!(tracker.check_and_add("user2", ip1).await.is_ok());
|
assert!(tracker.check_and_add("user2", ip1).await.is_ok());
|
||||||
assert!(tracker.check_and_add("user2", ip2).await.is_err());
|
assert!(tracker.check_and_add("user2", ip2).await.is_err());
|
||||||
}
|
}
|
||||||
@@ -377,7 +472,6 @@ mod tests {
|
|||||||
let ipv4 = test_ipv4(192, 168, 1, 1);
|
let ipv4 = test_ipv4(192, 168, 1, 1);
|
||||||
let ipv6 = test_ipv6();
|
let ipv6 = test_ipv6();
|
||||||
|
|
||||||
// Должны работать оба типа адресов
|
|
||||||
assert!(tracker.check_and_add("test_user", ipv4).await.is_ok());
|
assert!(tracker.check_and_add("test_user", ipv4).await.is_ok());
|
||||||
assert!(tracker.check_and_add("test_user", ipv6).await.is_ok());
|
assert!(tracker.check_and_add("test_user", ipv6).await.is_ok());
|
||||||
|
|
||||||
@@ -416,7 +510,6 @@ mod tests {
|
|||||||
let stats = tracker.get_stats().await;
|
let stats = tracker.get_stats().await;
|
||||||
assert_eq!(stats.len(), 2);
|
assert_eq!(stats.len(), 2);
|
||||||
|
|
||||||
// Проверяем наличие обоих пользователей в статистике
|
|
||||||
assert!(stats.iter().any(|(name, _, _)| name == "user1"));
|
assert!(stats.iter().any(|(name, _, _)| name == "user1"));
|
||||||
assert!(stats.iter().any(|(name, _, _)| name == "user2"));
|
assert!(stats.iter().any(|(name, _, _)| name == "user2"));
|
||||||
}
|
}
|
||||||
@@ -459,4 +552,74 @@ mod tests {
|
|||||||
assert_eq!(tracker.get_user_limit("user2").await, Some(3));
|
assert_eq!(tracker.get_user_limit("user2").await, Some(3));
|
||||||
assert_eq!(tracker.get_user_limit("user3").await, None);
|
assert_eq!(tracker.get_user_limit("user3").await, None);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_load_limits_replaces_previous_map() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
|
||||||
|
let mut first = HashMap::new();
|
||||||
|
first.insert("user1".to_string(), 2);
|
||||||
|
first.insert("user2".to_string(), 3);
|
||||||
|
tracker.load_limits(&first).await;
|
||||||
|
|
||||||
|
let mut second = HashMap::new();
|
||||||
|
second.insert("user2".to_string(), 5);
|
||||||
|
tracker.load_limits(&second).await;
|
||||||
|
|
||||||
|
assert_eq!(tracker.get_user_limit("user1").await, None);
|
||||||
|
assert_eq!(tracker.get_user_limit("user2").await, Some(5));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_time_window_mode_blocks_recent_ip_churn() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("test_user", 1).await;
|
||||||
|
tracker
|
||||||
|
.set_limit_policy(UserMaxUniqueIpsMode::TimeWindow, 30)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let ip1 = test_ipv4(10, 0, 0, 1);
|
||||||
|
let ip2 = test_ipv4(10, 0, 0, 2);
|
||||||
|
|
||||||
|
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||||
|
tracker.remove_ip("test_user", ip1).await;
|
||||||
|
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_combined_mode_enforces_active_and_recent_limits() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("test_user", 1).await;
|
||||||
|
tracker
|
||||||
|
.set_limit_policy(UserMaxUniqueIpsMode::Combined, 30)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let ip1 = test_ipv4(10, 0, 1, 1);
|
||||||
|
let ip2 = test_ipv4(10, 0, 1, 2);
|
||||||
|
|
||||||
|
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||||
|
|
||||||
|
tracker.remove_ip("test_user", ip1).await;
|
||||||
|
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_time_window_expires() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("test_user", 1).await;
|
||||||
|
tracker
|
||||||
|
.set_limit_policy(UserMaxUniqueIpsMode::TimeWindow, 1)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let ip1 = test_ipv4(10, 1, 0, 1);
|
||||||
|
let ip2 = test_ipv4(10, 1, 0, 2);
|
||||||
|
|
||||||
|
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||||
|
tracker.remove_ip("test_user", ip1).await;
|
||||||
|
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||||
|
|
||||||
|
tokio::time::sleep(Duration::from_millis(1100)).await;
|
||||||
|
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
130
src/maestro/admission.rs
Normal file
130
src/maestro/admission.rs
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use tokio::sync::watch;
|
||||||
|
use tracing::{info, warn};
|
||||||
|
|
||||||
|
use crate::config::ProxyConfig;
|
||||||
|
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
|
||||||
|
use crate::transport::middle_proxy::MePool;
|
||||||
|
|
||||||
|
const STARTUP_FALLBACK_AFTER: Duration = Duration::from_secs(80);
|
||||||
|
const RUNTIME_FALLBACK_AFTER: Duration = Duration::from_secs(6);
|
||||||
|
|
||||||
|
pub(crate) async fn configure_admission_gate(
|
||||||
|
config: &Arc<ProxyConfig>,
|
||||||
|
me_pool: Option<Arc<MePool>>,
|
||||||
|
route_runtime: Arc<RouteRuntimeController>,
|
||||||
|
admission_tx: &watch::Sender<bool>,
|
||||||
|
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||||
|
) {
|
||||||
|
if config.general.use_middle_proxy {
|
||||||
|
if let Some(pool) = me_pool.as_ref() {
|
||||||
|
let initial_ready = pool.admission_ready_conditional_cast().await;
|
||||||
|
admission_tx.send_replace(initial_ready);
|
||||||
|
let _ = route_runtime.set_mode(RelayRouteMode::Middle);
|
||||||
|
if initial_ready {
|
||||||
|
info!("Conditional-admission gate: open / ME pool READY");
|
||||||
|
} else {
|
||||||
|
warn!("Conditional-admission gate: closed / ME pool is NOT ready)");
|
||||||
|
}
|
||||||
|
|
||||||
|
let pool_for_gate = pool.clone();
|
||||||
|
let admission_tx_gate = admission_tx.clone();
|
||||||
|
let route_runtime_gate = route_runtime.clone();
|
||||||
|
let mut config_rx_gate = config_rx.clone();
|
||||||
|
let mut admission_poll_ms = config.general.me_admission_poll_ms.max(1);
|
||||||
|
let mut fallback_enabled = config.general.me2dc_fallback;
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let mut gate_open = initial_ready;
|
||||||
|
let mut route_mode = RelayRouteMode::Middle;
|
||||||
|
let mut ready_observed = initial_ready;
|
||||||
|
let mut not_ready_since = if initial_ready {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(Instant::now())
|
||||||
|
};
|
||||||
|
loop {
|
||||||
|
tokio::select! {
|
||||||
|
changed = config_rx_gate.changed() => {
|
||||||
|
if changed.is_err() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let cfg = config_rx_gate.borrow_and_update().clone();
|
||||||
|
admission_poll_ms = cfg.general.me_admission_poll_ms.max(1);
|
||||||
|
fallback_enabled = cfg.general.me2dc_fallback;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
_ = tokio::time::sleep(Duration::from_millis(admission_poll_ms)) => {}
|
||||||
|
}
|
||||||
|
let ready = pool_for_gate.admission_ready_conditional_cast().await;
|
||||||
|
let now = Instant::now();
|
||||||
|
let (next_gate_open, next_route_mode, next_fallback_active) = if ready {
|
||||||
|
ready_observed = true;
|
||||||
|
not_ready_since = None;
|
||||||
|
(true, RelayRouteMode::Middle, false)
|
||||||
|
} else {
|
||||||
|
let not_ready_started_at = *not_ready_since.get_or_insert(now);
|
||||||
|
let not_ready_for = now.saturating_duration_since(not_ready_started_at);
|
||||||
|
let fallback_after = if ready_observed {
|
||||||
|
RUNTIME_FALLBACK_AFTER
|
||||||
|
} else {
|
||||||
|
STARTUP_FALLBACK_AFTER
|
||||||
|
};
|
||||||
|
if fallback_enabled && not_ready_for > fallback_after {
|
||||||
|
(true, RelayRouteMode::Direct, true)
|
||||||
|
} else {
|
||||||
|
(false, RelayRouteMode::Middle, false)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if next_route_mode != route_mode {
|
||||||
|
route_mode = next_route_mode;
|
||||||
|
if let Some(snapshot) = route_runtime_gate.set_mode(route_mode) {
|
||||||
|
if matches!(route_mode, RelayRouteMode::Middle) {
|
||||||
|
info!(
|
||||||
|
target_mode = route_mode.as_str(),
|
||||||
|
cutover_generation = snapshot.generation,
|
||||||
|
"Middle-End routing restored for new sessions"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
let fallback_after = if ready_observed {
|
||||||
|
RUNTIME_FALLBACK_AFTER
|
||||||
|
} else {
|
||||||
|
STARTUP_FALLBACK_AFTER
|
||||||
|
};
|
||||||
|
warn!(
|
||||||
|
target_mode = route_mode.as_str(),
|
||||||
|
cutover_generation = snapshot.generation,
|
||||||
|
grace_secs = fallback_after.as_secs(),
|
||||||
|
"ME pool stayed not-ready beyond grace; routing new sessions via Direct-DC"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if next_gate_open != gate_open {
|
||||||
|
gate_open = next_gate_open;
|
||||||
|
admission_tx_gate.send_replace(gate_open);
|
||||||
|
if gate_open {
|
||||||
|
if next_fallback_active {
|
||||||
|
warn!("Conditional-admission gate opened in ME fallback mode");
|
||||||
|
} else {
|
||||||
|
info!("Conditional-admission gate opened / ME pool READY");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!("Conditional-admission gate closed / ME pool is NOT ready");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
admission_tx.send_replace(false);
|
||||||
|
let _ = route_runtime.set_mode(RelayRouteMode::Direct);
|
||||||
|
warn!("Conditional-admission gate: closed / ME pool is UNAVAILABLE");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
admission_tx.send_replace(true);
|
||||||
|
let _ = route_runtime.set_mode(RelayRouteMode::Direct);
|
||||||
|
}
|
||||||
|
}
|
||||||
220
src/maestro/connectivity.rs
Normal file
220
src/maestro/connectivity.rs
Normal file
@@ -0,0 +1,220 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
|
use crate::config::ProxyConfig;
|
||||||
|
use crate::crypto::SecureRandom;
|
||||||
|
use crate::network::probe::NetworkDecision;
|
||||||
|
use crate::startup::{
|
||||||
|
COMPONENT_DC_CONNECTIVITY_PING, COMPONENT_ME_CONNECTIVITY_PING, COMPONENT_RUNTIME_READY,
|
||||||
|
StartupTracker,
|
||||||
|
};
|
||||||
|
use crate::transport::middle_proxy::{
|
||||||
|
MePingFamily, MePingSample, MePool, format_me_route, format_sample_line, run_me_ping,
|
||||||
|
};
|
||||||
|
use crate::transport::UpstreamManager;
|
||||||
|
|
||||||
|
pub(crate) async fn run_startup_connectivity(
|
||||||
|
config: &Arc<ProxyConfig>,
|
||||||
|
me_pool: &Option<Arc<MePool>>,
|
||||||
|
rng: Arc<SecureRandom>,
|
||||||
|
startup_tracker: &Arc<StartupTracker>,
|
||||||
|
upstream_manager: Arc<UpstreamManager>,
|
||||||
|
prefer_ipv6: bool,
|
||||||
|
decision: &NetworkDecision,
|
||||||
|
process_started_at: Instant,
|
||||||
|
api_me_pool: Arc<RwLock<Option<Arc<MePool>>>>,
|
||||||
|
) {
|
||||||
|
if me_pool.is_some() {
|
||||||
|
startup_tracker
|
||||||
|
.start_component(
|
||||||
|
COMPONENT_ME_CONNECTIVITY_PING,
|
||||||
|
Some("run startup ME connectivity check".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
} else {
|
||||||
|
startup_tracker
|
||||||
|
.skip_component(
|
||||||
|
COMPONENT_ME_CONNECTIVITY_PING,
|
||||||
|
Some("ME pool is not available".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
if let Some(pool) = me_pool {
|
||||||
|
let me_results = run_me_ping(pool, &rng).await;
|
||||||
|
|
||||||
|
let v4_ok = me_results.iter().any(|r| {
|
||||||
|
matches!(r.family, MePingFamily::V4)
|
||||||
|
&& r.samples.iter().any(|s| s.error.is_none() && s.handshake_ms.is_some())
|
||||||
|
});
|
||||||
|
let v6_ok = me_results.iter().any(|r| {
|
||||||
|
matches!(r.family, MePingFamily::V6)
|
||||||
|
&& r.samples.iter().any(|s| s.error.is_none() && s.handshake_ms.is_some())
|
||||||
|
});
|
||||||
|
|
||||||
|
info!("================= Telegram ME Connectivity =================");
|
||||||
|
if v4_ok && v6_ok {
|
||||||
|
info!(" IPv4 and IPv6 available");
|
||||||
|
} else if v4_ok {
|
||||||
|
info!(" IPv4 only / IPv6 unavailable");
|
||||||
|
} else if v6_ok {
|
||||||
|
info!(" IPv6 only / IPv4 unavailable");
|
||||||
|
} else {
|
||||||
|
info!(" No ME connectivity");
|
||||||
|
}
|
||||||
|
let me_route =
|
||||||
|
format_me_route(&config.upstreams, &me_results, prefer_ipv6, v4_ok, v6_ok).await;
|
||||||
|
info!(" via {}", me_route);
|
||||||
|
info!("============================================================");
|
||||||
|
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
let mut grouped: BTreeMap<i32, Vec<MePingSample>> = BTreeMap::new();
|
||||||
|
for report in me_results {
|
||||||
|
for s in report.samples {
|
||||||
|
grouped.entry(s.dc).or_default().push(s);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let family_order = if prefer_ipv6 {
|
||||||
|
vec![MePingFamily::V6, MePingFamily::V4]
|
||||||
|
} else {
|
||||||
|
vec![MePingFamily::V4, MePingFamily::V6]
|
||||||
|
};
|
||||||
|
|
||||||
|
for (dc, samples) in grouped {
|
||||||
|
for family in &family_order {
|
||||||
|
let fam_samples: Vec<&MePingSample> = samples
|
||||||
|
.iter()
|
||||||
|
.filter(|s| matches!(s.family, f if &f == family))
|
||||||
|
.collect();
|
||||||
|
if fam_samples.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let fam_label = match family {
|
||||||
|
MePingFamily::V4 => "IPv4",
|
||||||
|
MePingFamily::V6 => "IPv6",
|
||||||
|
};
|
||||||
|
info!(" DC{} [{}]", dc, fam_label);
|
||||||
|
for sample in fam_samples {
|
||||||
|
let line = format_sample_line(sample);
|
||||||
|
info!("{}", line);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info!("============================================================");
|
||||||
|
startup_tracker
|
||||||
|
.complete_component(
|
||||||
|
COMPONENT_ME_CONNECTIVITY_PING,
|
||||||
|
Some("startup ME connectivity check completed".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("================= Telegram DC Connectivity =================");
|
||||||
|
startup_tracker
|
||||||
|
.start_component(
|
||||||
|
COMPONENT_DC_CONNECTIVITY_PING,
|
||||||
|
Some("run startup DC connectivity check".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let ping_results = upstream_manager
|
||||||
|
.ping_all_dcs(
|
||||||
|
prefer_ipv6,
|
||||||
|
&config.dc_overrides,
|
||||||
|
decision.ipv4_dc,
|
||||||
|
decision.ipv6_dc,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
for upstream_result in &ping_results {
|
||||||
|
let v6_works = upstream_result.v6_results.iter().any(|r| r.rtt_ms.is_some());
|
||||||
|
let v4_works = upstream_result.v4_results.iter().any(|r| r.rtt_ms.is_some());
|
||||||
|
|
||||||
|
if upstream_result.both_available {
|
||||||
|
if prefer_ipv6 {
|
||||||
|
info!(" IPv6 in use / IPv4 is fallback");
|
||||||
|
} else {
|
||||||
|
info!(" IPv4 in use / IPv6 is fallback");
|
||||||
|
}
|
||||||
|
} else if v6_works && !v4_works {
|
||||||
|
info!(" IPv6 only / IPv4 unavailable");
|
||||||
|
} else if v4_works && !v6_works {
|
||||||
|
info!(" IPv4 only / IPv6 unavailable");
|
||||||
|
} else if !v6_works && !v4_works {
|
||||||
|
info!(" No DC connectivity");
|
||||||
|
}
|
||||||
|
|
||||||
|
info!(" via {}", upstream_result.upstream_name);
|
||||||
|
info!("============================================================");
|
||||||
|
|
||||||
|
if v6_works {
|
||||||
|
for dc in &upstream_result.v6_results {
|
||||||
|
let addr_str = format!("{}:{}", dc.dc_addr.ip(), dc.dc_addr.port());
|
||||||
|
match &dc.rtt_ms {
|
||||||
|
Some(rtt) => {
|
||||||
|
info!(" DC{} [IPv6] {} - {:.0} ms", dc.dc_idx, addr_str, rtt);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
let err = dc.error.as_deref().unwrap_or("fail");
|
||||||
|
info!(" DC{} [IPv6] {} - FAIL ({})", dc.dc_idx, addr_str, err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("============================================================");
|
||||||
|
}
|
||||||
|
|
||||||
|
if v4_works {
|
||||||
|
for dc in &upstream_result.v4_results {
|
||||||
|
let addr_str = format!("{}:{}", dc.dc_addr.ip(), dc.dc_addr.port());
|
||||||
|
match &dc.rtt_ms {
|
||||||
|
Some(rtt) => {
|
||||||
|
info!(
|
||||||
|
" DC{} [IPv4] {}\t\t\t\t{:.0} ms",
|
||||||
|
dc.dc_idx, addr_str, rtt
|
||||||
|
);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
let err = dc.error.as_deref().unwrap_or("fail");
|
||||||
|
info!(
|
||||||
|
" DC{} [IPv4] {}:\t\t\t\tFAIL ({})",
|
||||||
|
dc.dc_idx, addr_str, err
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("============================================================");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
startup_tracker
|
||||||
|
.complete_component(
|
||||||
|
COMPONENT_DC_CONNECTIVITY_PING,
|
||||||
|
Some("startup DC connectivity check completed".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let initialized_secs = process_started_at.elapsed().as_secs();
|
||||||
|
let second_suffix = if initialized_secs == 1 { "" } else { "s" };
|
||||||
|
startup_tracker
|
||||||
|
.start_component(
|
||||||
|
COMPONENT_RUNTIME_READY,
|
||||||
|
Some("finalize startup runtime state".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
info!("===================== Telegram Startup =====================");
|
||||||
|
info!(
|
||||||
|
" DC/ME Initialized in {} second{}",
|
||||||
|
initialized_secs, second_suffix
|
||||||
|
);
|
||||||
|
info!("============================================================");
|
||||||
|
|
||||||
|
if let Some(pool) = me_pool {
|
||||||
|
pool.set_runtime_ready(true);
|
||||||
|
}
|
||||||
|
*api_me_pool.write().await = me_pool.clone();
|
||||||
|
}
|
||||||
320
src/maestro/helpers.rs
Normal file
320
src/maestro/helpers.rs
Normal file
@@ -0,0 +1,320 @@
|
|||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use tokio::sync::watch;
|
||||||
|
use tracing::{debug, error, info, warn};
|
||||||
|
|
||||||
|
use crate::cli;
|
||||||
|
use crate::config::ProxyConfig;
|
||||||
|
use crate::transport::middle_proxy::{
|
||||||
|
ProxyConfigData, fetch_proxy_config_with_raw, load_proxy_config_cache, save_proxy_config_cache,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub(crate) fn parse_cli() -> (String, bool, Option<String>) {
|
||||||
|
let mut config_path = "config.toml".to_string();
|
||||||
|
let mut silent = false;
|
||||||
|
let mut log_level: Option<String> = None;
|
||||||
|
|
||||||
|
let args: Vec<String> = std::env::args().skip(1).collect();
|
||||||
|
|
||||||
|
// Check for --init first (handled before tokio)
|
||||||
|
if let Some(init_opts) = cli::parse_init_args(&args) {
|
||||||
|
if let Err(e) = cli::run_init(init_opts) {
|
||||||
|
eprintln!("[telemt] Init failed: {}", e);
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
std::process::exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut i = 0;
|
||||||
|
while i < args.len() {
|
||||||
|
match args[i].as_str() {
|
||||||
|
"--silent" | "-s" => {
|
||||||
|
silent = true;
|
||||||
|
}
|
||||||
|
"--log-level" => {
|
||||||
|
i += 1;
|
||||||
|
if i < args.len() {
|
||||||
|
log_level = Some(args[i].clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s if s.starts_with("--log-level=") => {
|
||||||
|
log_level = Some(s.trim_start_matches("--log-level=").to_string());
|
||||||
|
}
|
||||||
|
"--help" | "-h" => {
|
||||||
|
eprintln!("Usage: telemt [config.toml] [OPTIONS]");
|
||||||
|
eprintln!();
|
||||||
|
eprintln!("Options:");
|
||||||
|
eprintln!(" --silent, -s Suppress info logs");
|
||||||
|
eprintln!(" --log-level <LEVEL> debug|verbose|normal|silent");
|
||||||
|
eprintln!(" --help, -h Show this help");
|
||||||
|
eprintln!();
|
||||||
|
eprintln!("Setup (fire-and-forget):");
|
||||||
|
eprintln!(
|
||||||
|
" --init Generate config, install systemd service, start"
|
||||||
|
);
|
||||||
|
eprintln!(" --port <PORT> Listen port (default: 443)");
|
||||||
|
eprintln!(
|
||||||
|
" --domain <DOMAIN> TLS domain for masking (default: www.google.com)"
|
||||||
|
);
|
||||||
|
eprintln!(
|
||||||
|
" --secret <HEX> 32-char hex secret (auto-generated if omitted)"
|
||||||
|
);
|
||||||
|
eprintln!(" --user <NAME> Username (default: user)");
|
||||||
|
eprintln!(" --config-dir <DIR> Config directory (default: /etc/telemt)");
|
||||||
|
eprintln!(" --no-start Don't start the service after install");
|
||||||
|
std::process::exit(0);
|
||||||
|
}
|
||||||
|
"--version" | "-V" => {
|
||||||
|
println!("telemt {}", env!("CARGO_PKG_VERSION"));
|
||||||
|
std::process::exit(0);
|
||||||
|
}
|
||||||
|
s if !s.starts_with('-') => {
|
||||||
|
config_path = s.to_string();
|
||||||
|
}
|
||||||
|
other => {
|
||||||
|
eprintln!("Unknown option: {}", other);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
(config_path, silent, log_level)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn print_proxy_links(host: &str, port: u16, config: &ProxyConfig) {
|
||||||
|
info!(target: "telemt::links", "--- Proxy Links ({}) ---", host);
|
||||||
|
for user_name in config.general.links.show.resolve_users(&config.access.users) {
|
||||||
|
if let Some(secret) = config.access.users.get(user_name) {
|
||||||
|
info!(target: "telemt::links", "User: {}", user_name);
|
||||||
|
if config.general.modes.classic {
|
||||||
|
info!(
|
||||||
|
target: "telemt::links",
|
||||||
|
" Classic: tg://proxy?server={}&port={}&secret={}",
|
||||||
|
host, port, secret
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if config.general.modes.secure {
|
||||||
|
info!(
|
||||||
|
target: "telemt::links",
|
||||||
|
" DD: tg://proxy?server={}&port={}&secret=dd{}",
|
||||||
|
host, port, secret
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if config.general.modes.tls {
|
||||||
|
let mut domains = Vec::with_capacity(1 + config.censorship.tls_domains.len());
|
||||||
|
domains.push(config.censorship.tls_domain.clone());
|
||||||
|
for d in &config.censorship.tls_domains {
|
||||||
|
if !domains.contains(d) {
|
||||||
|
domains.push(d.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for domain in domains {
|
||||||
|
let domain_hex = hex::encode(&domain);
|
||||||
|
info!(
|
||||||
|
target: "telemt::links",
|
||||||
|
" EE-TLS: tg://proxy?server={}&port={}&secret=ee{}{}",
|
||||||
|
host, port, secret, domain_hex
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!(target: "telemt::links", "User '{}' in show_link not found", user_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info!(target: "telemt::links", "------------------------");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn write_beobachten_snapshot(path: &str, payload: &str) -> std::io::Result<()> {
|
||||||
|
if let Some(parent) = std::path::Path::new(path).parent()
|
||||||
|
&& !parent.as_os_str().is_empty()
|
||||||
|
{
|
||||||
|
tokio::fs::create_dir_all(parent).await?;
|
||||||
|
}
|
||||||
|
tokio::fs::write(path, payload).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn unit_label(value: u64, singular: &'static str, plural: &'static str) -> &'static str {
|
||||||
|
if value == 1 { singular } else { plural }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn format_uptime(total_secs: u64) -> String {
|
||||||
|
const SECS_PER_MINUTE: u64 = 60;
|
||||||
|
const SECS_PER_HOUR: u64 = 60 * SECS_PER_MINUTE;
|
||||||
|
const SECS_PER_DAY: u64 = 24 * SECS_PER_HOUR;
|
||||||
|
const SECS_PER_MONTH: u64 = 30 * SECS_PER_DAY;
|
||||||
|
const SECS_PER_YEAR: u64 = 12 * SECS_PER_MONTH;
|
||||||
|
|
||||||
|
let mut remaining = total_secs;
|
||||||
|
let years = remaining / SECS_PER_YEAR;
|
||||||
|
remaining %= SECS_PER_YEAR;
|
||||||
|
let months = remaining / SECS_PER_MONTH;
|
||||||
|
remaining %= SECS_PER_MONTH;
|
||||||
|
let days = remaining / SECS_PER_DAY;
|
||||||
|
remaining %= SECS_PER_DAY;
|
||||||
|
let hours = remaining / SECS_PER_HOUR;
|
||||||
|
remaining %= SECS_PER_HOUR;
|
||||||
|
let minutes = remaining / SECS_PER_MINUTE;
|
||||||
|
let seconds = remaining % SECS_PER_MINUTE;
|
||||||
|
|
||||||
|
let mut parts = Vec::new();
|
||||||
|
if total_secs > SECS_PER_YEAR {
|
||||||
|
parts.push(format!("{} {}", years, unit_label(years, "year", "years")));
|
||||||
|
}
|
||||||
|
if total_secs > SECS_PER_MONTH {
|
||||||
|
parts.push(format!(
|
||||||
|
"{} {}",
|
||||||
|
months,
|
||||||
|
unit_label(months, "month", "months")
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if total_secs > SECS_PER_DAY {
|
||||||
|
parts.push(format!("{} {}", days, unit_label(days, "day", "days")));
|
||||||
|
}
|
||||||
|
if total_secs > SECS_PER_HOUR {
|
||||||
|
parts.push(format!("{} {}", hours, unit_label(hours, "hour", "hours")));
|
||||||
|
}
|
||||||
|
if total_secs > SECS_PER_MINUTE {
|
||||||
|
parts.push(format!(
|
||||||
|
"{} {}",
|
||||||
|
minutes,
|
||||||
|
unit_label(minutes, "minute", "minutes")
|
||||||
|
));
|
||||||
|
}
|
||||||
|
parts.push(format!(
|
||||||
|
"{} {}",
|
||||||
|
seconds,
|
||||||
|
unit_label(seconds, "second", "seconds")
|
||||||
|
));
|
||||||
|
|
||||||
|
format!("{} / {} seconds", parts.join(", "), total_secs)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn wait_until_admission_open(admission_rx: &mut watch::Receiver<bool>) -> bool {
|
||||||
|
loop {
|
||||||
|
if *admission_rx.borrow() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if admission_rx.changed().await.is_err() {
|
||||||
|
return *admission_rx.borrow();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn is_expected_handshake_eof(err: &crate::error::ProxyError) -> bool {
|
||||||
|
err.to_string().contains("expected 64 bytes, got 0")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn load_startup_proxy_config_snapshot(
|
||||||
|
url: &str,
|
||||||
|
cache_path: Option<&str>,
|
||||||
|
me2dc_fallback: bool,
|
||||||
|
label: &'static str,
|
||||||
|
) -> Option<ProxyConfigData> {
|
||||||
|
loop {
|
||||||
|
match fetch_proxy_config_with_raw(url).await {
|
||||||
|
Ok((cfg, raw)) => {
|
||||||
|
if !cfg.map.is_empty() {
|
||||||
|
if let Some(path) = cache_path
|
||||||
|
&& let Err(e) = save_proxy_config_cache(path, &raw).await
|
||||||
|
{
|
||||||
|
warn!(error = %e, path, snapshot = label, "Failed to store startup proxy-config cache");
|
||||||
|
}
|
||||||
|
return Some(cfg);
|
||||||
|
}
|
||||||
|
|
||||||
|
warn!(snapshot = label, url, "Startup proxy-config is empty; trying disk cache");
|
||||||
|
if let Some(path) = cache_path {
|
||||||
|
match load_proxy_config_cache(path).await {
|
||||||
|
Ok(cached) if !cached.map.is_empty() => {
|
||||||
|
info!(
|
||||||
|
snapshot = label,
|
||||||
|
path,
|
||||||
|
proxy_for_lines = cached.proxy_for_lines,
|
||||||
|
"Loaded startup proxy-config from disk cache"
|
||||||
|
);
|
||||||
|
return Some(cached);
|
||||||
|
}
|
||||||
|
Ok(_) => {
|
||||||
|
warn!(
|
||||||
|
snapshot = label,
|
||||||
|
path,
|
||||||
|
"Startup proxy-config cache is empty; ignoring cache file"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(cache_err) => {
|
||||||
|
debug!(
|
||||||
|
snapshot = label,
|
||||||
|
path,
|
||||||
|
error = %cache_err,
|
||||||
|
"Startup proxy-config cache unavailable"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if me2dc_fallback {
|
||||||
|
error!(
|
||||||
|
snapshot = label,
|
||||||
|
"Startup proxy-config unavailable and no saved config found; falling back to direct mode"
|
||||||
|
);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
warn!(
|
||||||
|
snapshot = label,
|
||||||
|
retry_in_secs = 2,
|
||||||
|
"Startup proxy-config unavailable and no saved config found; retrying because me2dc_fallback=false"
|
||||||
|
);
|
||||||
|
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||||
|
}
|
||||||
|
Err(fetch_err) => {
|
||||||
|
if let Some(path) = cache_path {
|
||||||
|
match load_proxy_config_cache(path).await {
|
||||||
|
Ok(cached) if !cached.map.is_empty() => {
|
||||||
|
info!(
|
||||||
|
snapshot = label,
|
||||||
|
path,
|
||||||
|
proxy_for_lines = cached.proxy_for_lines,
|
||||||
|
"Loaded startup proxy-config from disk cache"
|
||||||
|
);
|
||||||
|
return Some(cached);
|
||||||
|
}
|
||||||
|
Ok(_) => {
|
||||||
|
warn!(
|
||||||
|
snapshot = label,
|
||||||
|
path,
|
||||||
|
"Startup proxy-config cache is empty; ignoring cache file"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(cache_err) => {
|
||||||
|
debug!(
|
||||||
|
snapshot = label,
|
||||||
|
path,
|
||||||
|
error = %cache_err,
|
||||||
|
"Startup proxy-config cache unavailable"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if me2dc_fallback {
|
||||||
|
error!(
|
||||||
|
snapshot = label,
|
||||||
|
error = %fetch_err,
|
||||||
|
"Startup proxy-config unavailable and no cached data; falling back to direct mode"
|
||||||
|
);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
warn!(
|
||||||
|
snapshot = label,
|
||||||
|
error = %fetch_err,
|
||||||
|
retry_in_secs = 2,
|
||||||
|
"Startup proxy-config unavailable; retrying because me2dc_fallback=false"
|
||||||
|
);
|
||||||
|
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
465
src/maestro/listeners.rs
Normal file
465
src/maestro/listeners.rs
Normal file
@@ -0,0 +1,465 @@
|
|||||||
|
use std::error::Error;
|
||||||
|
use std::net::{IpAddr, SocketAddr};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use tokio::net::TcpListener;
|
||||||
|
#[cfg(unix)]
|
||||||
|
use tokio::net::UnixListener;
|
||||||
|
use tokio::sync::{Semaphore, watch};
|
||||||
|
use tracing::{debug, error, info, warn};
|
||||||
|
|
||||||
|
use crate::config::ProxyConfig;
|
||||||
|
use crate::crypto::SecureRandom;
|
||||||
|
use crate::ip_tracker::UserIpTracker;
|
||||||
|
use crate::proxy::route_mode::{ROUTE_SWITCH_ERROR_MSG, RouteRuntimeController};
|
||||||
|
use crate::proxy::ClientHandler;
|
||||||
|
use crate::startup::{COMPONENT_LISTENERS_BIND, StartupTracker};
|
||||||
|
use crate::stats::beobachten::BeobachtenStore;
|
||||||
|
use crate::stats::{ReplayChecker, Stats};
|
||||||
|
use crate::stream::BufferPool;
|
||||||
|
use crate::tls_front::TlsFrontCache;
|
||||||
|
use crate::transport::middle_proxy::MePool;
|
||||||
|
use crate::transport::{
|
||||||
|
ListenOptions, UpstreamManager, create_listener, find_listener_processes,
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::helpers::{is_expected_handshake_eof, print_proxy_links, wait_until_admission_open};
|
||||||
|
|
||||||
|
pub(crate) struct BoundListeners {
|
||||||
|
pub(crate) listeners: Vec<(TcpListener, bool)>,
|
||||||
|
pub(crate) has_unix_listener: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub(crate) async fn bind_listeners(
|
||||||
|
config: &Arc<ProxyConfig>,
|
||||||
|
decision_ipv4_dc: bool,
|
||||||
|
decision_ipv6_dc: bool,
|
||||||
|
detected_ip_v4: Option<IpAddr>,
|
||||||
|
detected_ip_v6: Option<IpAddr>,
|
||||||
|
startup_tracker: &Arc<StartupTracker>,
|
||||||
|
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||||
|
admission_rx: watch::Receiver<bool>,
|
||||||
|
stats: Arc<Stats>,
|
||||||
|
upstream_manager: Arc<UpstreamManager>,
|
||||||
|
replay_checker: Arc<ReplayChecker>,
|
||||||
|
buffer_pool: Arc<BufferPool>,
|
||||||
|
rng: Arc<SecureRandom>,
|
||||||
|
me_pool: Option<Arc<MePool>>,
|
||||||
|
route_runtime: Arc<RouteRuntimeController>,
|
||||||
|
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||||
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
|
beobachten: Arc<BeobachtenStore>,
|
||||||
|
max_connections: Arc<Semaphore>,
|
||||||
|
) -> Result<BoundListeners, Box<dyn Error>> {
|
||||||
|
startup_tracker
|
||||||
|
.start_component(
|
||||||
|
COMPONENT_LISTENERS_BIND,
|
||||||
|
Some("bind TCP/Unix listeners".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let mut listeners = Vec::new();
|
||||||
|
|
||||||
|
for listener_conf in &config.server.listeners {
|
||||||
|
let addr = SocketAddr::new(listener_conf.ip, config.server.port);
|
||||||
|
if addr.is_ipv4() && !decision_ipv4_dc {
|
||||||
|
warn!(%addr, "Skipping IPv4 listener: IPv4 disabled by [network]");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if addr.is_ipv6() && !decision_ipv6_dc {
|
||||||
|
warn!(%addr, "Skipping IPv6 listener: IPv6 disabled by [network]");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let options = ListenOptions {
|
||||||
|
reuse_port: listener_conf.reuse_allow,
|
||||||
|
ipv6_only: listener_conf.ip.is_ipv6(),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
match create_listener(addr, &options) {
|
||||||
|
Ok(socket) => {
|
||||||
|
let listener = TcpListener::from_std(socket.into())?;
|
||||||
|
info!("Listening on {}", addr);
|
||||||
|
let listener_proxy_protocol =
|
||||||
|
listener_conf.proxy_protocol.unwrap_or(config.server.proxy_protocol);
|
||||||
|
|
||||||
|
let public_host = if let Some(ref announce) = listener_conf.announce {
|
||||||
|
announce.clone()
|
||||||
|
} else if listener_conf.ip.is_unspecified() {
|
||||||
|
if listener_conf.ip.is_ipv4() {
|
||||||
|
detected_ip_v4
|
||||||
|
.map(|ip| ip.to_string())
|
||||||
|
.unwrap_or_else(|| listener_conf.ip.to_string())
|
||||||
|
} else {
|
||||||
|
detected_ip_v6
|
||||||
|
.map(|ip| ip.to_string())
|
||||||
|
.unwrap_or_else(|| listener_conf.ip.to_string())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
listener_conf.ip.to_string()
|
||||||
|
};
|
||||||
|
|
||||||
|
if config.general.links.public_host.is_none() && !config.general.links.show.is_empty() {
|
||||||
|
let link_port = config.general.links.public_port.unwrap_or(config.server.port);
|
||||||
|
print_proxy_links(&public_host, link_port, config);
|
||||||
|
}
|
||||||
|
|
||||||
|
listeners.push((listener, listener_proxy_protocol));
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
if e.kind() == std::io::ErrorKind::AddrInUse {
|
||||||
|
let owners = find_listener_processes(addr);
|
||||||
|
if owners.is_empty() {
|
||||||
|
error!(
|
||||||
|
%addr,
|
||||||
|
"Failed to bind: address already in use (owner process unresolved)"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
for owner in owners {
|
||||||
|
error!(
|
||||||
|
%addr,
|
||||||
|
pid = owner.pid,
|
||||||
|
process = %owner.process,
|
||||||
|
"Failed to bind: address already in use"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !listener_conf.reuse_allow {
|
||||||
|
error!(
|
||||||
|
%addr,
|
||||||
|
"reuse_allow=false; set [[server.listeners]].reuse_allow=true to allow multi-instance listening"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
error!("Failed to bind to {}: {}", addr, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !config.general.links.show.is_empty()
|
||||||
|
&& (config.general.links.public_host.is_some() || listeners.is_empty())
|
||||||
|
{
|
||||||
|
let (host, port) = if let Some(ref h) = config.general.links.public_host {
|
||||||
|
(
|
||||||
|
h.clone(),
|
||||||
|
config.general.links.public_port.unwrap_or(config.server.port),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
let ip = detected_ip_v4
|
||||||
|
.or(detected_ip_v6)
|
||||||
|
.map(|ip| ip.to_string());
|
||||||
|
if ip.is_none() {
|
||||||
|
warn!(
|
||||||
|
"show_link is configured but public IP could not be detected. Set public_host in config."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
(
|
||||||
|
ip.unwrap_or_else(|| "UNKNOWN".to_string()),
|
||||||
|
config.general.links.public_port.unwrap_or(config.server.port),
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
print_proxy_links(&host, port, config);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut has_unix_listener = false;
|
||||||
|
#[cfg(unix)]
|
||||||
|
if let Some(ref unix_path) = config.server.listen_unix_sock {
|
||||||
|
let _ = tokio::fs::remove_file(unix_path).await;
|
||||||
|
|
||||||
|
let unix_listener = UnixListener::bind(unix_path)?;
|
||||||
|
|
||||||
|
if let Some(ref perm_str) = config.server.listen_unix_sock_perm {
|
||||||
|
match u32::from_str_radix(perm_str.trim_start_matches('0'), 8) {
|
||||||
|
Ok(mode) => {
|
||||||
|
use std::os::unix::fs::PermissionsExt;
|
||||||
|
let perms = std::fs::Permissions::from_mode(mode);
|
||||||
|
if let Err(e) = std::fs::set_permissions(unix_path, perms) {
|
||||||
|
error!("Failed to set unix socket permissions to {}: {}", perm_str, e);
|
||||||
|
} else {
|
||||||
|
info!("Listening on unix:{} (mode {})", unix_path, perm_str);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Invalid listen_unix_sock_perm '{}': {}. Ignoring.", perm_str, e);
|
||||||
|
info!("Listening on unix:{}", unix_path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
info!("Listening on unix:{}", unix_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
has_unix_listener = true;
|
||||||
|
|
||||||
|
let mut config_rx_unix: watch::Receiver<Arc<ProxyConfig>> = config_rx.clone();
|
||||||
|
let mut admission_rx_unix = admission_rx.clone();
|
||||||
|
let stats = stats.clone();
|
||||||
|
let upstream_manager = upstream_manager.clone();
|
||||||
|
let replay_checker = replay_checker.clone();
|
||||||
|
let buffer_pool = buffer_pool.clone();
|
||||||
|
let rng = rng.clone();
|
||||||
|
let me_pool = me_pool.clone();
|
||||||
|
let route_runtime = route_runtime.clone();
|
||||||
|
let tls_cache = tls_cache.clone();
|
||||||
|
let ip_tracker = ip_tracker.clone();
|
||||||
|
let beobachten = beobachten.clone();
|
||||||
|
let max_connections_unix = max_connections.clone();
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let unix_conn_counter = Arc::new(std::sync::atomic::AtomicU64::new(1));
|
||||||
|
|
||||||
|
loop {
|
||||||
|
if !wait_until_admission_open(&mut admission_rx_unix).await {
|
||||||
|
warn!("Conditional-admission gate channel closed for unix listener");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
match unix_listener.accept().await {
|
||||||
|
Ok((stream, _)) => {
|
||||||
|
let permit = match max_connections_unix.clone().acquire_owned().await {
|
||||||
|
Ok(permit) => permit,
|
||||||
|
Err(_) => {
|
||||||
|
error!("Connection limiter is closed");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let conn_id =
|
||||||
|
unix_conn_counter.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||||
|
let fake_peer =
|
||||||
|
SocketAddr::from(([127, 0, 0, 1], (conn_id % 65535) as u16));
|
||||||
|
|
||||||
|
let config = config_rx_unix.borrow_and_update().clone();
|
||||||
|
let stats = stats.clone();
|
||||||
|
let upstream_manager = upstream_manager.clone();
|
||||||
|
let replay_checker = replay_checker.clone();
|
||||||
|
let buffer_pool = buffer_pool.clone();
|
||||||
|
let rng = rng.clone();
|
||||||
|
let me_pool = me_pool.clone();
|
||||||
|
let route_runtime = route_runtime.clone();
|
||||||
|
let tls_cache = tls_cache.clone();
|
||||||
|
let ip_tracker = ip_tracker.clone();
|
||||||
|
let beobachten = beobachten.clone();
|
||||||
|
let proxy_protocol_enabled = config.server.proxy_protocol;
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let _permit = permit;
|
||||||
|
if let Err(e) = crate::proxy::client::handle_client_stream(
|
||||||
|
stream,
|
||||||
|
fake_peer,
|
||||||
|
config,
|
||||||
|
stats,
|
||||||
|
upstream_manager,
|
||||||
|
replay_checker,
|
||||||
|
buffer_pool,
|
||||||
|
rng,
|
||||||
|
me_pool,
|
||||||
|
route_runtime,
|
||||||
|
tls_cache,
|
||||||
|
ip_tracker,
|
||||||
|
beobachten,
|
||||||
|
proxy_protocol_enabled,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
debug!(error = %e, "Unix socket connection error");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!("Unix socket accept error: {}", e);
|
||||||
|
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
startup_tracker
|
||||||
|
.complete_component(
|
||||||
|
COMPONENT_LISTENERS_BIND,
|
||||||
|
Some(format!(
|
||||||
|
"listeners configured tcp={} unix={}",
|
||||||
|
listeners.len(),
|
||||||
|
has_unix_listener
|
||||||
|
)),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
Ok(BoundListeners {
|
||||||
|
listeners,
|
||||||
|
has_unix_listener,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub(crate) fn spawn_tcp_accept_loops(
|
||||||
|
listeners: Vec<(TcpListener, bool)>,
|
||||||
|
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||||
|
admission_rx: watch::Receiver<bool>,
|
||||||
|
stats: Arc<Stats>,
|
||||||
|
upstream_manager: Arc<UpstreamManager>,
|
||||||
|
replay_checker: Arc<ReplayChecker>,
|
||||||
|
buffer_pool: Arc<BufferPool>,
|
||||||
|
rng: Arc<SecureRandom>,
|
||||||
|
me_pool: Option<Arc<MePool>>,
|
||||||
|
route_runtime: Arc<RouteRuntimeController>,
|
||||||
|
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||||
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
|
beobachten: Arc<BeobachtenStore>,
|
||||||
|
max_connections: Arc<Semaphore>,
|
||||||
|
) {
|
||||||
|
for (listener, listener_proxy_protocol) in listeners {
|
||||||
|
let mut config_rx: watch::Receiver<Arc<ProxyConfig>> = config_rx.clone();
|
||||||
|
let mut admission_rx_tcp = admission_rx.clone();
|
||||||
|
let stats = stats.clone();
|
||||||
|
let upstream_manager = upstream_manager.clone();
|
||||||
|
let replay_checker = replay_checker.clone();
|
||||||
|
let buffer_pool = buffer_pool.clone();
|
||||||
|
let rng = rng.clone();
|
||||||
|
let me_pool = me_pool.clone();
|
||||||
|
let route_runtime = route_runtime.clone();
|
||||||
|
let tls_cache = tls_cache.clone();
|
||||||
|
let ip_tracker = ip_tracker.clone();
|
||||||
|
let beobachten = beobachten.clone();
|
||||||
|
let max_connections_tcp = max_connections.clone();
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
if !wait_until_admission_open(&mut admission_rx_tcp).await {
|
||||||
|
warn!("Conditional-admission gate channel closed for tcp listener");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
match listener.accept().await {
|
||||||
|
Ok((stream, peer_addr)) => {
|
||||||
|
let permit = match max_connections_tcp.clone().acquire_owned().await {
|
||||||
|
Ok(permit) => permit,
|
||||||
|
Err(_) => {
|
||||||
|
error!("Connection limiter is closed");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let config = config_rx.borrow_and_update().clone();
|
||||||
|
let stats = stats.clone();
|
||||||
|
let upstream_manager = upstream_manager.clone();
|
||||||
|
let replay_checker = replay_checker.clone();
|
||||||
|
let buffer_pool = buffer_pool.clone();
|
||||||
|
let rng = rng.clone();
|
||||||
|
let me_pool = me_pool.clone();
|
||||||
|
let route_runtime = route_runtime.clone();
|
||||||
|
let tls_cache = tls_cache.clone();
|
||||||
|
let ip_tracker = ip_tracker.clone();
|
||||||
|
let beobachten = beobachten.clone();
|
||||||
|
let proxy_protocol_enabled = listener_proxy_protocol;
|
||||||
|
let real_peer_report = Arc::new(std::sync::Mutex::new(None));
|
||||||
|
let real_peer_report_for_handler = real_peer_report.clone();
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let _permit = permit;
|
||||||
|
if let Err(e) = ClientHandler::new(
|
||||||
|
stream,
|
||||||
|
peer_addr,
|
||||||
|
config,
|
||||||
|
stats,
|
||||||
|
upstream_manager,
|
||||||
|
replay_checker,
|
||||||
|
buffer_pool,
|
||||||
|
rng,
|
||||||
|
me_pool,
|
||||||
|
route_runtime,
|
||||||
|
tls_cache,
|
||||||
|
ip_tracker,
|
||||||
|
beobachten,
|
||||||
|
proxy_protocol_enabled,
|
||||||
|
real_peer_report_for_handler,
|
||||||
|
)
|
||||||
|
.run()
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
let real_peer = match real_peer_report.lock() {
|
||||||
|
Ok(guard) => *guard,
|
||||||
|
Err(_) => None,
|
||||||
|
};
|
||||||
|
let peer_closed = matches!(
|
||||||
|
&e,
|
||||||
|
crate::error::ProxyError::Io(ioe)
|
||||||
|
if matches!(
|
||||||
|
ioe.kind(),
|
||||||
|
std::io::ErrorKind::ConnectionReset
|
||||||
|
| std::io::ErrorKind::ConnectionAborted
|
||||||
|
| std::io::ErrorKind::BrokenPipe
|
||||||
|
| std::io::ErrorKind::NotConnected
|
||||||
|
)
|
||||||
|
) || matches!(
|
||||||
|
&e,
|
||||||
|
crate::error::ProxyError::Stream(
|
||||||
|
crate::error::StreamError::Io(ioe)
|
||||||
|
)
|
||||||
|
if matches!(
|
||||||
|
ioe.kind(),
|
||||||
|
std::io::ErrorKind::ConnectionReset
|
||||||
|
| std::io::ErrorKind::ConnectionAborted
|
||||||
|
| std::io::ErrorKind::BrokenPipe
|
||||||
|
| std::io::ErrorKind::NotConnected
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
let me_closed = matches!(
|
||||||
|
&e,
|
||||||
|
crate::error::ProxyError::Proxy(msg) if msg == "ME connection lost"
|
||||||
|
);
|
||||||
|
let route_switched = matches!(
|
||||||
|
&e,
|
||||||
|
crate::error::ProxyError::Proxy(msg) if msg == ROUTE_SWITCH_ERROR_MSG
|
||||||
|
);
|
||||||
|
|
||||||
|
match (peer_closed, me_closed) {
|
||||||
|
(true, _) => {
|
||||||
|
if let Some(real_peer) = real_peer {
|
||||||
|
debug!(peer = %peer_addr, real_peer = %real_peer, error = %e, "Connection closed by client");
|
||||||
|
} else {
|
||||||
|
debug!(peer = %peer_addr, error = %e, "Connection closed by client");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(_, true) => {
|
||||||
|
if let Some(real_peer) = real_peer {
|
||||||
|
warn!(peer = %peer_addr, real_peer = %real_peer, error = %e, "Connection closed: Middle-End dropped session");
|
||||||
|
} else {
|
||||||
|
warn!(peer = %peer_addr, error = %e, "Connection closed: Middle-End dropped session");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ if route_switched => {
|
||||||
|
if let Some(real_peer) = real_peer {
|
||||||
|
info!(peer = %peer_addr, real_peer = %real_peer, error = %e, "Connection closed by controlled route cutover");
|
||||||
|
} else {
|
||||||
|
info!(peer = %peer_addr, error = %e, "Connection closed by controlled route cutover");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ if is_expected_handshake_eof(&e) => {
|
||||||
|
if let Some(real_peer) = real_peer {
|
||||||
|
info!(peer = %peer_addr, real_peer = %real_peer, error = %e, "Connection closed during initial handshake");
|
||||||
|
} else {
|
||||||
|
info!(peer = %peer_addr, error = %e, "Connection closed during initial handshake");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
if let Some(real_peer) = real_peer {
|
||||||
|
warn!(peer = %peer_addr, real_peer = %real_peer, error = %e, "Connection closed with error");
|
||||||
|
} else {
|
||||||
|
warn!(peer = %peer_addr, error = %e, "Connection closed with error");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!("Accept error: {}", e);
|
||||||
|
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
515
src/maestro/me_startup.rs
Normal file
515
src/maestro/me_startup.rs
Normal file
@@ -0,0 +1,515 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
use tracing::{error, info, warn};
|
||||||
|
|
||||||
|
use crate::config::ProxyConfig;
|
||||||
|
use crate::crypto::SecureRandom;
|
||||||
|
use crate::network::probe::{NetworkDecision, NetworkProbe};
|
||||||
|
use crate::startup::{
|
||||||
|
COMPONENT_ME_POOL_CONSTRUCT, COMPONENT_ME_POOL_INIT_STAGE1, COMPONENT_ME_PROXY_CONFIG_V4,
|
||||||
|
COMPONENT_ME_PROXY_CONFIG_V6, COMPONENT_ME_SECRET_FETCH, StartupMeStatus, StartupTracker,
|
||||||
|
};
|
||||||
|
use crate::stats::Stats;
|
||||||
|
use crate::transport::middle_proxy::MePool;
|
||||||
|
use crate::transport::UpstreamManager;
|
||||||
|
|
||||||
|
use super::helpers::load_startup_proxy_config_snapshot;
|
||||||
|
|
||||||
|
pub(crate) async fn initialize_me_pool(
|
||||||
|
use_middle_proxy: bool,
|
||||||
|
config: &ProxyConfig,
|
||||||
|
decision: &NetworkDecision,
|
||||||
|
probe: &NetworkProbe,
|
||||||
|
startup_tracker: &Arc<StartupTracker>,
|
||||||
|
upstream_manager: Arc<UpstreamManager>,
|
||||||
|
rng: Arc<SecureRandom>,
|
||||||
|
stats: Arc<Stats>,
|
||||||
|
api_me_pool: Arc<RwLock<Option<Arc<MePool>>>>,
|
||||||
|
) -> Option<Arc<MePool>> {
|
||||||
|
if !use_middle_proxy {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("=== Middle Proxy Mode ===");
|
||||||
|
let me_nat_probe = config.general.middle_proxy_nat_probe && config.network.stun_use;
|
||||||
|
if config.general.middle_proxy_nat_probe && !config.network.stun_use {
|
||||||
|
info!("Middle-proxy STUN probing disabled by network.stun_use=false");
|
||||||
|
}
|
||||||
|
|
||||||
|
let me2dc_fallback = config.general.me2dc_fallback;
|
||||||
|
let me_init_retry_attempts = config.general.me_init_retry_attempts;
|
||||||
|
let me_init_warn_after_attempts: u32 = 3;
|
||||||
|
|
||||||
|
// Global ad_tag (pool default). Used when user has no per-user tag in access.user_ad_tags.
|
||||||
|
let proxy_tag = config
|
||||||
|
.general
|
||||||
|
.ad_tag
|
||||||
|
.as_ref()
|
||||||
|
.map(|tag| hex::decode(tag).expect("general.ad_tag must be validated before startup"));
|
||||||
|
|
||||||
|
// =============================================================
|
||||||
|
// CRITICAL: Download Telegram proxy-secret (NOT user secret!)
|
||||||
|
//
|
||||||
|
// C MTProxy uses TWO separate secrets:
|
||||||
|
// -S flag = 16-byte user secret for client obfuscation
|
||||||
|
// --aes-pwd = 32-512 byte binary file for ME RPC auth
|
||||||
|
//
|
||||||
|
// proxy-secret is from: https://core.telegram.org/getProxySecret
|
||||||
|
// =============================================================
|
||||||
|
let proxy_secret_path = config.general.proxy_secret_path.as_deref();
|
||||||
|
let pool_size = config.general.middle_proxy_pool_size.max(1);
|
||||||
|
let proxy_secret = loop {
|
||||||
|
match crate::transport::middle_proxy::fetch_proxy_secret(
|
||||||
|
proxy_secret_path,
|
||||||
|
config.general.proxy_secret_len_max,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(proxy_secret) => break Some(proxy_secret),
|
||||||
|
Err(e) => {
|
||||||
|
startup_tracker.set_me_last_error(Some(e.to_string())).await;
|
||||||
|
if me2dc_fallback {
|
||||||
|
error!(
|
||||||
|
error = %e,
|
||||||
|
"ME startup failed: proxy-secret is unavailable and no saved secret found; falling back to direct mode"
|
||||||
|
);
|
||||||
|
break None;
|
||||||
|
}
|
||||||
|
|
||||||
|
warn!(
|
||||||
|
error = %e,
|
||||||
|
retry_in_secs = 2,
|
||||||
|
"ME startup failed: proxy-secret is unavailable and no saved secret found; retrying because me2dc_fallback=false"
|
||||||
|
);
|
||||||
|
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
match proxy_secret {
|
||||||
|
Some(proxy_secret) => {
|
||||||
|
startup_tracker
|
||||||
|
.complete_component(
|
||||||
|
COMPONENT_ME_SECRET_FETCH,
|
||||||
|
Some("proxy-secret loaded".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
info!(
|
||||||
|
secret_len = proxy_secret.len(),
|
||||||
|
key_sig = format_args!(
|
||||||
|
"0x{:08x}",
|
||||||
|
if proxy_secret.len() >= 4 {
|
||||||
|
u32::from_le_bytes([
|
||||||
|
proxy_secret[0],
|
||||||
|
proxy_secret[1],
|
||||||
|
proxy_secret[2],
|
||||||
|
proxy_secret[3],
|
||||||
|
])
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
}
|
||||||
|
),
|
||||||
|
"Proxy-secret loaded"
|
||||||
|
);
|
||||||
|
|
||||||
|
startup_tracker
|
||||||
|
.start_component(
|
||||||
|
COMPONENT_ME_PROXY_CONFIG_V4,
|
||||||
|
Some("load startup proxy-config v4".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.set_me_status(StartupMeStatus::Initializing, COMPONENT_ME_PROXY_CONFIG_V4)
|
||||||
|
.await;
|
||||||
|
let cfg_v4 = load_startup_proxy_config_snapshot(
|
||||||
|
"https://core.telegram.org/getProxyConfig",
|
||||||
|
config.general.proxy_config_v4_cache_path.as_deref(),
|
||||||
|
me2dc_fallback,
|
||||||
|
"getProxyConfig",
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
if cfg_v4.is_some() {
|
||||||
|
startup_tracker
|
||||||
|
.complete_component(
|
||||||
|
COMPONENT_ME_PROXY_CONFIG_V4,
|
||||||
|
Some("proxy-config v4 loaded".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
} else {
|
||||||
|
startup_tracker
|
||||||
|
.fail_component(
|
||||||
|
COMPONENT_ME_PROXY_CONFIG_V4,
|
||||||
|
Some("proxy-config v4 unavailable".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
startup_tracker
|
||||||
|
.start_component(
|
||||||
|
COMPONENT_ME_PROXY_CONFIG_V6,
|
||||||
|
Some("load startup proxy-config v6".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.set_me_status(StartupMeStatus::Initializing, COMPONENT_ME_PROXY_CONFIG_V6)
|
||||||
|
.await;
|
||||||
|
let cfg_v6 = load_startup_proxy_config_snapshot(
|
||||||
|
"https://core.telegram.org/getProxyConfigV6",
|
||||||
|
config.general.proxy_config_v6_cache_path.as_deref(),
|
||||||
|
me2dc_fallback,
|
||||||
|
"getProxyConfigV6",
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
if cfg_v6.is_some() {
|
||||||
|
startup_tracker
|
||||||
|
.complete_component(
|
||||||
|
COMPONENT_ME_PROXY_CONFIG_V6,
|
||||||
|
Some("proxy-config v6 loaded".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
} else {
|
||||||
|
startup_tracker
|
||||||
|
.fail_component(
|
||||||
|
COMPONENT_ME_PROXY_CONFIG_V6,
|
||||||
|
Some("proxy-config v6 unavailable".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let (Some(cfg_v4), Some(cfg_v6)) = (cfg_v4, cfg_v6) {
|
||||||
|
startup_tracker
|
||||||
|
.start_component(
|
||||||
|
COMPONENT_ME_POOL_CONSTRUCT,
|
||||||
|
Some("construct ME pool".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.set_me_status(StartupMeStatus::Initializing, COMPONENT_ME_POOL_CONSTRUCT)
|
||||||
|
.await;
|
||||||
|
let pool = MePool::new(
|
||||||
|
proxy_tag.clone(),
|
||||||
|
proxy_secret,
|
||||||
|
config.general.middle_proxy_nat_ip,
|
||||||
|
me_nat_probe,
|
||||||
|
None,
|
||||||
|
config.network.stun_servers.clone(),
|
||||||
|
config.general.stun_nat_probe_concurrency,
|
||||||
|
probe.detected_ipv6,
|
||||||
|
config.timeouts.me_one_retry,
|
||||||
|
config.timeouts.me_one_timeout_ms,
|
||||||
|
cfg_v4.map.clone(),
|
||||||
|
cfg_v6.map.clone(),
|
||||||
|
cfg_v4.default_dc.or(cfg_v6.default_dc),
|
||||||
|
decision.clone(),
|
||||||
|
Some(upstream_manager.clone()),
|
||||||
|
rng.clone(),
|
||||||
|
stats.clone(),
|
||||||
|
config.general.me_keepalive_enabled,
|
||||||
|
config.general.me_keepalive_interval_secs,
|
||||||
|
config.general.me_keepalive_jitter_secs,
|
||||||
|
config.general.me_keepalive_payload_random,
|
||||||
|
config.general.rpc_proxy_req_every,
|
||||||
|
config.general.me_warmup_stagger_enabled,
|
||||||
|
config.general.me_warmup_step_delay_ms,
|
||||||
|
config.general.me_warmup_step_jitter_ms,
|
||||||
|
config.general.me_reconnect_max_concurrent_per_dc,
|
||||||
|
config.general.me_reconnect_backoff_base_ms,
|
||||||
|
config.general.me_reconnect_backoff_cap_ms,
|
||||||
|
config.general.me_reconnect_fast_retry_count,
|
||||||
|
config.general.me_single_endpoint_shadow_writers,
|
||||||
|
config.general.me_single_endpoint_outage_mode_enabled,
|
||||||
|
config.general.me_single_endpoint_outage_disable_quarantine,
|
||||||
|
config.general.me_single_endpoint_outage_backoff_min_ms,
|
||||||
|
config.general.me_single_endpoint_outage_backoff_max_ms,
|
||||||
|
config.general.me_single_endpoint_shadow_rotate_every_secs,
|
||||||
|
config.general.me_floor_mode,
|
||||||
|
config.general.me_adaptive_floor_idle_secs,
|
||||||
|
config.general.me_adaptive_floor_min_writers_single_endpoint,
|
||||||
|
config.general.me_adaptive_floor_min_writers_multi_endpoint,
|
||||||
|
config.general.me_adaptive_floor_recover_grace_secs,
|
||||||
|
config.general.me_adaptive_floor_writers_per_core_total,
|
||||||
|
config.general.me_adaptive_floor_cpu_cores_override,
|
||||||
|
config.general.me_adaptive_floor_max_extra_writers_single_per_core,
|
||||||
|
config.general.me_adaptive_floor_max_extra_writers_multi_per_core,
|
||||||
|
config.general.me_adaptive_floor_max_active_writers_per_core,
|
||||||
|
config.general.me_adaptive_floor_max_warm_writers_per_core,
|
||||||
|
config.general.me_adaptive_floor_max_active_writers_global,
|
||||||
|
config.general.me_adaptive_floor_max_warm_writers_global,
|
||||||
|
config.general.hardswap,
|
||||||
|
config.general.me_pool_drain_ttl_secs,
|
||||||
|
config.general.effective_me_pool_force_close_secs(),
|
||||||
|
config.general.me_pool_min_fresh_ratio,
|
||||||
|
config.general.me_hardswap_warmup_delay_min_ms,
|
||||||
|
config.general.me_hardswap_warmup_delay_max_ms,
|
||||||
|
config.general.me_hardswap_warmup_extra_passes,
|
||||||
|
config.general.me_hardswap_warmup_pass_backoff_base_ms,
|
||||||
|
config.general.me_bind_stale_mode,
|
||||||
|
config.general.me_bind_stale_ttl_secs,
|
||||||
|
config.general.me_secret_atomic_snapshot,
|
||||||
|
config.general.me_deterministic_writer_sort,
|
||||||
|
config.general.me_writer_pick_mode,
|
||||||
|
config.general.me_writer_pick_sample_size,
|
||||||
|
config.general.me_socks_kdf_policy,
|
||||||
|
config.general.me_writer_cmd_channel_capacity,
|
||||||
|
config.general.me_route_channel_capacity,
|
||||||
|
config.general.me_route_backpressure_base_timeout_ms,
|
||||||
|
config.general.me_route_backpressure_high_timeout_ms,
|
||||||
|
config.general.me_route_backpressure_high_watermark_pct,
|
||||||
|
config.general.me_reader_route_data_wait_ms,
|
||||||
|
config.general.me_health_interval_ms_unhealthy,
|
||||||
|
config.general.me_health_interval_ms_healthy,
|
||||||
|
config.general.me_warn_rate_limit_ms,
|
||||||
|
config.general.me_route_no_writer_mode,
|
||||||
|
config.general.me_route_no_writer_wait_ms,
|
||||||
|
config.general.me_route_inline_recovery_attempts,
|
||||||
|
config.general.me_route_inline_recovery_wait_ms,
|
||||||
|
);
|
||||||
|
startup_tracker
|
||||||
|
.complete_component(
|
||||||
|
COMPONENT_ME_POOL_CONSTRUCT,
|
||||||
|
Some("ME pool object created".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
*api_me_pool.write().await = Some(pool.clone());
|
||||||
|
startup_tracker
|
||||||
|
.start_component(
|
||||||
|
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||||
|
Some("initialize ME pool writers".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.set_me_status(StartupMeStatus::Initializing, COMPONENT_ME_POOL_INIT_STAGE1)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
if me2dc_fallback {
|
||||||
|
let pool_bg = pool.clone();
|
||||||
|
let rng_bg = rng.clone();
|
||||||
|
let startup_tracker_bg = startup_tracker.clone();
|
||||||
|
let retry_limit = if me_init_retry_attempts == 0 {
|
||||||
|
String::from("unlimited")
|
||||||
|
} else {
|
||||||
|
me_init_retry_attempts.to_string()
|
||||||
|
};
|
||||||
|
std::thread::spawn(move || {
|
||||||
|
let runtime = match tokio::runtime::Builder::new_current_thread()
|
||||||
|
.enable_all()
|
||||||
|
.build()
|
||||||
|
{
|
||||||
|
Ok(runtime) => runtime,
|
||||||
|
Err(error) => {
|
||||||
|
error!(error = %error, "Failed to build background runtime for ME initialization");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
runtime.block_on(async move {
|
||||||
|
let mut init_attempt: u32 = 0;
|
||||||
|
loop {
|
||||||
|
init_attempt = init_attempt.saturating_add(1);
|
||||||
|
startup_tracker_bg.set_me_init_attempt(init_attempt).await;
|
||||||
|
match pool_bg.init(pool_size, &rng_bg).await {
|
||||||
|
Ok(()) => {
|
||||||
|
startup_tracker_bg.set_me_last_error(None).await;
|
||||||
|
startup_tracker_bg
|
||||||
|
.complete_component(
|
||||||
|
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||||
|
Some("ME pool initialized".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker_bg
|
||||||
|
.set_me_status(StartupMeStatus::Ready, "ready")
|
||||||
|
.await;
|
||||||
|
info!(
|
||||||
|
attempt = init_attempt,
|
||||||
|
"Middle-End pool initialized successfully"
|
||||||
|
);
|
||||||
|
|
||||||
|
let pool_health = pool_bg.clone();
|
||||||
|
let rng_health = rng_bg.clone();
|
||||||
|
let min_conns = pool_size;
|
||||||
|
tokio::spawn(async move {
|
||||||
|
crate::transport::middle_proxy::me_health_monitor(
|
||||||
|
pool_health,
|
||||||
|
rng_health,
|
||||||
|
min_conns,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
startup_tracker_bg.set_me_last_error(Some(e.to_string())).await;
|
||||||
|
if init_attempt >= me_init_warn_after_attempts {
|
||||||
|
warn!(
|
||||||
|
error = %e,
|
||||||
|
attempt = init_attempt,
|
||||||
|
retry_limit = %retry_limit,
|
||||||
|
retry_in_secs = 2,
|
||||||
|
"ME pool is not ready yet; retrying background initialization"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
info!(
|
||||||
|
error = %e,
|
||||||
|
attempt = init_attempt,
|
||||||
|
retry_limit = %retry_limit,
|
||||||
|
retry_in_secs = 2,
|
||||||
|
"ME pool startup warmup: retrying background initialization"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
pool_bg.reset_stun_state();
|
||||||
|
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
startup_tracker
|
||||||
|
.set_me_status(StartupMeStatus::Initializing, "background_init")
|
||||||
|
.await;
|
||||||
|
info!(
|
||||||
|
startup_grace_secs = 80,
|
||||||
|
"ME pool initialization continues in background; startup continues with conditional Direct fallback"
|
||||||
|
);
|
||||||
|
Some(pool)
|
||||||
|
} else {
|
||||||
|
let mut init_attempt: u32 = 0;
|
||||||
|
loop {
|
||||||
|
init_attempt = init_attempt.saturating_add(1);
|
||||||
|
startup_tracker.set_me_init_attempt(init_attempt).await;
|
||||||
|
match pool.init(pool_size, &rng).await {
|
||||||
|
Ok(()) => {
|
||||||
|
startup_tracker.set_me_last_error(None).await;
|
||||||
|
startup_tracker
|
||||||
|
.complete_component(
|
||||||
|
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||||
|
Some("ME pool initialized".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.set_me_status(StartupMeStatus::Ready, "ready")
|
||||||
|
.await;
|
||||||
|
info!(
|
||||||
|
attempt = init_attempt,
|
||||||
|
"Middle-End pool initialized successfully"
|
||||||
|
);
|
||||||
|
|
||||||
|
let pool_clone = pool.clone();
|
||||||
|
let rng_clone = rng.clone();
|
||||||
|
let min_conns = pool_size;
|
||||||
|
tokio::spawn(async move {
|
||||||
|
crate::transport::middle_proxy::me_health_monitor(
|
||||||
|
pool_clone, rng_clone, min_conns,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
});
|
||||||
|
|
||||||
|
break Some(pool);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
startup_tracker.set_me_last_error(Some(e.to_string())).await;
|
||||||
|
let retries_limited = me_init_retry_attempts > 0;
|
||||||
|
if retries_limited && init_attempt >= me_init_retry_attempts {
|
||||||
|
startup_tracker
|
||||||
|
.fail_component(
|
||||||
|
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||||
|
Some("ME init retry budget exhausted".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.set_me_status(StartupMeStatus::Failed, "failed")
|
||||||
|
.await;
|
||||||
|
error!(
|
||||||
|
error = %e,
|
||||||
|
attempt = init_attempt,
|
||||||
|
retry_limit = me_init_retry_attempts,
|
||||||
|
"ME pool init retries exhausted; startup cannot continue in middle-proxy mode"
|
||||||
|
);
|
||||||
|
break None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let retry_limit = if me_init_retry_attempts == 0 {
|
||||||
|
String::from("unlimited")
|
||||||
|
} else {
|
||||||
|
me_init_retry_attempts.to_string()
|
||||||
|
};
|
||||||
|
if init_attempt >= me_init_warn_after_attempts {
|
||||||
|
warn!(
|
||||||
|
error = %e,
|
||||||
|
attempt = init_attempt,
|
||||||
|
retry_limit = retry_limit,
|
||||||
|
me2dc_fallback = me2dc_fallback,
|
||||||
|
retry_in_secs = 2,
|
||||||
|
"ME pool is not ready yet; retrying startup initialization"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
info!(
|
||||||
|
error = %e,
|
||||||
|
attempt = init_attempt,
|
||||||
|
retry_limit = retry_limit,
|
||||||
|
me2dc_fallback = me2dc_fallback,
|
||||||
|
retry_in_secs = 2,
|
||||||
|
"ME pool startup warmup: retrying initialization"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
pool.reset_stun_state();
|
||||||
|
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
startup_tracker
|
||||||
|
.skip_component(
|
||||||
|
COMPONENT_ME_POOL_CONSTRUCT,
|
||||||
|
Some("ME configs are incomplete".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.fail_component(
|
||||||
|
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||||
|
Some("ME configs are incomplete".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.set_me_status(StartupMeStatus::Failed, "failed")
|
||||||
|
.await;
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
startup_tracker
|
||||||
|
.fail_component(
|
||||||
|
COMPONENT_ME_SECRET_FETCH,
|
||||||
|
Some("proxy-secret unavailable".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.skip_component(
|
||||||
|
COMPONENT_ME_PROXY_CONFIG_V4,
|
||||||
|
Some("proxy-secret unavailable".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.skip_component(
|
||||||
|
COMPONENT_ME_PROXY_CONFIG_V6,
|
||||||
|
Some("proxy-secret unavailable".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.skip_component(
|
||||||
|
COMPONENT_ME_POOL_CONSTRUCT,
|
||||||
|
Some("proxy-secret unavailable".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.fail_component(
|
||||||
|
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||||
|
Some("proxy-secret unavailable".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.set_me_status(StartupMeStatus::Failed, "failed")
|
||||||
|
.await;
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
553
src/maestro/mod.rs
Normal file
553
src/maestro/mod.rs
Normal file
@@ -0,0 +1,553 @@
|
|||||||
|
//! telemt — Telegram MTProto Proxy
|
||||||
|
|
||||||
|
#![allow(unused_assignments)]
|
||||||
|
|
||||||
|
// Runtime orchestration modules.
|
||||||
|
// - helpers: CLI and shared startup/runtime helper routines.
|
||||||
|
// - tls_bootstrap: TLS front cache bootstrap and refresh tasks.
|
||||||
|
// - me_startup: Middle-End secret/config fetch and pool initialization.
|
||||||
|
// - connectivity: startup ME/DC connectivity diagnostics.
|
||||||
|
// - runtime_tasks: hot-reload and background task orchestration.
|
||||||
|
// - admission: conditional-cast gate and route mode switching.
|
||||||
|
// - listeners: TCP/Unix listener bind and accept-loop orchestration.
|
||||||
|
// - shutdown: graceful shutdown sequence and uptime logging.
|
||||||
|
mod helpers;
|
||||||
|
mod admission;
|
||||||
|
mod connectivity;
|
||||||
|
mod listeners;
|
||||||
|
mod me_startup;
|
||||||
|
mod runtime_tasks;
|
||||||
|
mod shutdown;
|
||||||
|
mod tls_bootstrap;
|
||||||
|
|
||||||
|
use std::net::{IpAddr, SocketAddr};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||||
|
use tokio::sync::{RwLock, Semaphore, watch};
|
||||||
|
use tracing::{error, info, warn};
|
||||||
|
use tracing_subscriber::{EnvFilter, fmt, prelude::*, reload};
|
||||||
|
|
||||||
|
use crate::api;
|
||||||
|
use crate::config::{LogLevel, ProxyConfig};
|
||||||
|
use crate::crypto::SecureRandom;
|
||||||
|
use crate::ip_tracker::UserIpTracker;
|
||||||
|
use crate::network::probe::{decide_network_capabilities, log_probe_result, run_probe};
|
||||||
|
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
|
||||||
|
use crate::stats::beobachten::BeobachtenStore;
|
||||||
|
use crate::stats::telemetry::TelemetryPolicy;
|
||||||
|
use crate::stats::{ReplayChecker, Stats};
|
||||||
|
use crate::startup::{
|
||||||
|
COMPONENT_API_BOOTSTRAP, COMPONENT_CONFIG_LOAD,
|
||||||
|
COMPONENT_ME_POOL_CONSTRUCT, COMPONENT_ME_POOL_INIT_STAGE1,
|
||||||
|
COMPONENT_ME_PROXY_CONFIG_V4, COMPONENT_ME_PROXY_CONFIG_V6, COMPONENT_ME_SECRET_FETCH,
|
||||||
|
COMPONENT_NETWORK_PROBE, COMPONENT_TRACING_INIT, StartupMeStatus, StartupTracker,
|
||||||
|
};
|
||||||
|
use crate::stream::BufferPool;
|
||||||
|
use crate::transport::middle_proxy::MePool;
|
||||||
|
use crate::transport::UpstreamManager;
|
||||||
|
use helpers::parse_cli;
|
||||||
|
|
||||||
|
/// Runs the full telemt runtime startup pipeline and blocks until shutdown.
|
||||||
|
pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||||
|
let process_started_at = Instant::now();
|
||||||
|
let process_started_at_epoch_secs = SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap_or_default()
|
||||||
|
.as_secs();
|
||||||
|
let startup_tracker = Arc::new(StartupTracker::new(process_started_at_epoch_secs));
|
||||||
|
startup_tracker
|
||||||
|
.start_component(COMPONENT_CONFIG_LOAD, Some("load and validate config".to_string()))
|
||||||
|
.await;
|
||||||
|
let (config_path, cli_silent, cli_log_level) = parse_cli();
|
||||||
|
|
||||||
|
let mut config = match ProxyConfig::load(&config_path) {
|
||||||
|
Ok(c) => c,
|
||||||
|
Err(e) => {
|
||||||
|
if std::path::Path::new(&config_path).exists() {
|
||||||
|
eprintln!("[telemt] Error: {}", e);
|
||||||
|
std::process::exit(1);
|
||||||
|
} else {
|
||||||
|
let default = ProxyConfig::default();
|
||||||
|
std::fs::write(&config_path, toml::to_string_pretty(&default).unwrap()).unwrap();
|
||||||
|
eprintln!("[telemt] Created default config at {}", config_path);
|
||||||
|
default
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Err(e) = config.validate() {
|
||||||
|
eprintln!("[telemt] Invalid config: {}", e);
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(e) = crate::network::dns_overrides::install_entries(&config.network.dns_overrides) {
|
||||||
|
eprintln!("[telemt] Invalid network.dns_overrides: {}", e);
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
startup_tracker
|
||||||
|
.complete_component(COMPONENT_CONFIG_LOAD, Some("config is ready".to_string()))
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let has_rust_log = std::env::var("RUST_LOG").is_ok();
|
||||||
|
let effective_log_level = if cli_silent {
|
||||||
|
LogLevel::Silent
|
||||||
|
} else if let Some(ref s) = cli_log_level {
|
||||||
|
LogLevel::from_str_loose(s)
|
||||||
|
} else {
|
||||||
|
config.general.log_level.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
let (filter_layer, filter_handle) = reload::Layer::new(EnvFilter::new("info"));
|
||||||
|
startup_tracker
|
||||||
|
.start_component(COMPONENT_TRACING_INIT, Some("initialize tracing subscriber".to_string()))
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Configure color output based on config
|
||||||
|
let fmt_layer = if config.general.disable_colors {
|
||||||
|
fmt::Layer::default().with_ansi(false)
|
||||||
|
} else {
|
||||||
|
fmt::Layer::default().with_ansi(true)
|
||||||
|
};
|
||||||
|
|
||||||
|
tracing_subscriber::registry()
|
||||||
|
.with(filter_layer)
|
||||||
|
.with(fmt_layer)
|
||||||
|
.init();
|
||||||
|
startup_tracker
|
||||||
|
.complete_component(COMPONENT_TRACING_INIT, Some("tracing initialized".to_string()))
|
||||||
|
.await;
|
||||||
|
|
||||||
|
info!("Telemt MTProxy v{}", env!("CARGO_PKG_VERSION"));
|
||||||
|
info!("Log level: {}", effective_log_level);
|
||||||
|
if config.general.disable_colors {
|
||||||
|
info!("Colors: disabled");
|
||||||
|
}
|
||||||
|
info!(
|
||||||
|
"Modes: classic={} secure={} tls={}",
|
||||||
|
config.general.modes.classic, config.general.modes.secure, config.general.modes.tls
|
||||||
|
);
|
||||||
|
if config.general.modes.classic {
|
||||||
|
warn!("Classic mode is vulnerable to DPI detection; enable only for legacy clients");
|
||||||
|
}
|
||||||
|
info!("TLS domain: {}", config.censorship.tls_domain);
|
||||||
|
if let Some(ref sock) = config.censorship.mask_unix_sock {
|
||||||
|
info!("Mask: {} -> unix:{}", config.censorship.mask, sock);
|
||||||
|
if !std::path::Path::new(sock).exists() {
|
||||||
|
warn!(
|
||||||
|
"Unix socket '{}' does not exist yet. Masking will fail until it appears.",
|
||||||
|
sock
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
info!(
|
||||||
|
"Mask: {} -> {}:{}",
|
||||||
|
config.censorship.mask,
|
||||||
|
config
|
||||||
|
.censorship
|
||||||
|
.mask_host
|
||||||
|
.as_deref()
|
||||||
|
.unwrap_or(&config.censorship.tls_domain),
|
||||||
|
config.censorship.mask_port
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.censorship.tls_domain == "www.google.com" {
|
||||||
|
warn!("Using default tls_domain. Consider setting a custom domain.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let stats = Arc::new(Stats::new());
|
||||||
|
stats.apply_telemetry_policy(TelemetryPolicy::from_config(&config.general.telemetry));
|
||||||
|
|
||||||
|
let upstream_manager = Arc::new(UpstreamManager::new(
|
||||||
|
config.upstreams.clone(),
|
||||||
|
config.general.upstream_connect_retry_attempts,
|
||||||
|
config.general.upstream_connect_retry_backoff_ms,
|
||||||
|
config.general.upstream_connect_budget_ms,
|
||||||
|
config.general.upstream_unhealthy_fail_threshold,
|
||||||
|
config.general.upstream_connect_failfast_hard_errors,
|
||||||
|
stats.clone(),
|
||||||
|
));
|
||||||
|
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||||
|
ip_tracker.load_limits(&config.access.user_max_unique_ips).await;
|
||||||
|
ip_tracker
|
||||||
|
.set_limit_policy(
|
||||||
|
config.access.user_max_unique_ips_mode,
|
||||||
|
config.access.user_max_unique_ips_window_secs,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
if !config.access.user_max_unique_ips.is_empty() {
|
||||||
|
info!(
|
||||||
|
"IP limits configured for {} users",
|
||||||
|
config.access.user_max_unique_ips.len()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if !config.network.dns_overrides.is_empty() {
|
||||||
|
info!(
|
||||||
|
"Runtime DNS overrides configured: {} entries",
|
||||||
|
config.network.dns_overrides.len()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let (api_config_tx, api_config_rx) = watch::channel(Arc::new(config.clone()));
|
||||||
|
let (detected_ips_tx, detected_ips_rx) = watch::channel((None::<IpAddr>, None::<IpAddr>));
|
||||||
|
let initial_admission_open = !config.general.use_middle_proxy;
|
||||||
|
let (admission_tx, admission_rx) = watch::channel(initial_admission_open);
|
||||||
|
let initial_route_mode = if config.general.use_middle_proxy {
|
||||||
|
RelayRouteMode::Middle
|
||||||
|
} else {
|
||||||
|
RelayRouteMode::Direct
|
||||||
|
};
|
||||||
|
let route_runtime = Arc::new(RouteRuntimeController::new(initial_route_mode));
|
||||||
|
let api_me_pool = Arc::new(RwLock::new(None::<Arc<MePool>>));
|
||||||
|
startup_tracker
|
||||||
|
.start_component(COMPONENT_API_BOOTSTRAP, Some("spawn API listener task".to_string()))
|
||||||
|
.await;
|
||||||
|
|
||||||
|
if config.server.api.enabled {
|
||||||
|
let listen = match config.server.api.listen.parse::<SocketAddr>() {
|
||||||
|
Ok(listen) => listen,
|
||||||
|
Err(error) => {
|
||||||
|
warn!(
|
||||||
|
error = %error,
|
||||||
|
listen = %config.server.api.listen,
|
||||||
|
"Invalid server.api.listen; API is disabled"
|
||||||
|
);
|
||||||
|
SocketAddr::from(([127, 0, 0, 1], 0))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if listen.port() != 0 {
|
||||||
|
let stats_api = stats.clone();
|
||||||
|
let ip_tracker_api = ip_tracker.clone();
|
||||||
|
let me_pool_api = api_me_pool.clone();
|
||||||
|
let upstream_manager_api = upstream_manager.clone();
|
||||||
|
let route_runtime_api = route_runtime.clone();
|
||||||
|
let config_rx_api = api_config_rx.clone();
|
||||||
|
let admission_rx_api = admission_rx.clone();
|
||||||
|
let config_path_api = std::path::PathBuf::from(&config_path);
|
||||||
|
let startup_tracker_api = startup_tracker.clone();
|
||||||
|
let detected_ips_rx_api = detected_ips_rx.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
api::serve(
|
||||||
|
listen,
|
||||||
|
stats_api,
|
||||||
|
ip_tracker_api,
|
||||||
|
me_pool_api,
|
||||||
|
route_runtime_api,
|
||||||
|
upstream_manager_api,
|
||||||
|
config_rx_api,
|
||||||
|
admission_rx_api,
|
||||||
|
config_path_api,
|
||||||
|
detected_ips_rx_api,
|
||||||
|
process_started_at_epoch_secs,
|
||||||
|
startup_tracker_api,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
});
|
||||||
|
startup_tracker
|
||||||
|
.complete_component(
|
||||||
|
COMPONENT_API_BOOTSTRAP,
|
||||||
|
Some(format!("api task spawned on {}", listen)),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
} else {
|
||||||
|
startup_tracker
|
||||||
|
.skip_component(
|
||||||
|
COMPONENT_API_BOOTSTRAP,
|
||||||
|
Some("server.api.listen has zero port".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
startup_tracker
|
||||||
|
.skip_component(
|
||||||
|
COMPONENT_API_BOOTSTRAP,
|
||||||
|
Some("server.api.enabled is false".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut tls_domains = Vec::with_capacity(1 + config.censorship.tls_domains.len());
|
||||||
|
tls_domains.push(config.censorship.tls_domain.clone());
|
||||||
|
for d in &config.censorship.tls_domains {
|
||||||
|
if !tls_domains.contains(d) {
|
||||||
|
tls_domains.push(d.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let tls_cache = tls_bootstrap::bootstrap_tls_front(
|
||||||
|
&config,
|
||||||
|
&tls_domains,
|
||||||
|
upstream_manager.clone(),
|
||||||
|
&startup_tracker,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
startup_tracker
|
||||||
|
.start_component(COMPONENT_NETWORK_PROBE, Some("probe network capabilities".to_string()))
|
||||||
|
.await;
|
||||||
|
let probe = run_probe(
|
||||||
|
&config.network,
|
||||||
|
config.general.middle_proxy_nat_probe,
|
||||||
|
config.general.stun_nat_probe_concurrency,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
detected_ips_tx.send_replace((
|
||||||
|
probe.detected_ipv4.map(IpAddr::V4),
|
||||||
|
probe.detected_ipv6.map(IpAddr::V6),
|
||||||
|
));
|
||||||
|
let decision = decide_network_capabilities(&config.network, &probe);
|
||||||
|
log_probe_result(&probe, &decision);
|
||||||
|
startup_tracker
|
||||||
|
.complete_component(
|
||||||
|
COMPONENT_NETWORK_PROBE,
|
||||||
|
Some("network capabilities determined".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let prefer_ipv6 = decision.prefer_ipv6();
|
||||||
|
let mut use_middle_proxy = config.general.use_middle_proxy;
|
||||||
|
let beobachten = Arc::new(BeobachtenStore::new());
|
||||||
|
let rng = Arc::new(SecureRandom::new());
|
||||||
|
|
||||||
|
// Connection concurrency limit
|
||||||
|
let max_connections = Arc::new(Semaphore::new(10_000));
|
||||||
|
|
||||||
|
let me2dc_fallback = config.general.me2dc_fallback;
|
||||||
|
let me_init_retry_attempts = config.general.me_init_retry_attempts;
|
||||||
|
if use_middle_proxy && !decision.ipv4_me && !decision.ipv6_me {
|
||||||
|
if me2dc_fallback {
|
||||||
|
warn!("No usable IP family for Middle Proxy detected; falling back to direct DC");
|
||||||
|
use_middle_proxy = false;
|
||||||
|
} else {
|
||||||
|
warn!(
|
||||||
|
"No usable IP family for Middle Proxy detected; me2dc_fallback=false, ME init retries stay active"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if use_middle_proxy {
|
||||||
|
startup_tracker
|
||||||
|
.set_me_status(StartupMeStatus::Initializing, COMPONENT_ME_SECRET_FETCH)
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.start_component(
|
||||||
|
COMPONENT_ME_SECRET_FETCH,
|
||||||
|
Some("fetch proxy-secret from source/cache".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.set_me_retry_limit(if !me2dc_fallback || me_init_retry_attempts == 0 {
|
||||||
|
"unlimited".to_string()
|
||||||
|
} else {
|
||||||
|
me_init_retry_attempts.to_string()
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
} else {
|
||||||
|
startup_tracker
|
||||||
|
.set_me_status(StartupMeStatus::Skipped, "skipped")
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.skip_component(
|
||||||
|
COMPONENT_ME_SECRET_FETCH,
|
||||||
|
Some("middle proxy mode disabled".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.skip_component(
|
||||||
|
COMPONENT_ME_PROXY_CONFIG_V4,
|
||||||
|
Some("middle proxy mode disabled".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.skip_component(
|
||||||
|
COMPONENT_ME_PROXY_CONFIG_V6,
|
||||||
|
Some("middle proxy mode disabled".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.skip_component(
|
||||||
|
COMPONENT_ME_POOL_CONSTRUCT,
|
||||||
|
Some("middle proxy mode disabled".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.skip_component(
|
||||||
|
COMPONENT_ME_POOL_INIT_STAGE1,
|
||||||
|
Some("middle proxy mode disabled".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let me_pool: Option<Arc<MePool>> = me_startup::initialize_me_pool(
|
||||||
|
use_middle_proxy,
|
||||||
|
&config,
|
||||||
|
&decision,
|
||||||
|
&probe,
|
||||||
|
&startup_tracker,
|
||||||
|
upstream_manager.clone(),
|
||||||
|
rng.clone(),
|
||||||
|
stats.clone(),
|
||||||
|
api_me_pool.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// If ME failed to initialize, force direct-only mode.
|
||||||
|
if me_pool.is_some() {
|
||||||
|
startup_tracker
|
||||||
|
.set_transport_mode("middle_proxy")
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.set_degraded(false)
|
||||||
|
.await;
|
||||||
|
info!("Transport: Middle-End Proxy - all DC-over-RPC");
|
||||||
|
} else {
|
||||||
|
let _ = use_middle_proxy;
|
||||||
|
use_middle_proxy = false;
|
||||||
|
// Make runtime config reflect direct-only mode for handlers.
|
||||||
|
config.general.use_middle_proxy = false;
|
||||||
|
startup_tracker
|
||||||
|
.set_transport_mode("direct")
|
||||||
|
.await;
|
||||||
|
startup_tracker
|
||||||
|
.set_degraded(true)
|
||||||
|
.await;
|
||||||
|
if me2dc_fallback {
|
||||||
|
startup_tracker
|
||||||
|
.set_me_status(StartupMeStatus::Failed, "fallback_to_direct")
|
||||||
|
.await;
|
||||||
|
} else {
|
||||||
|
startup_tracker
|
||||||
|
.set_me_status(StartupMeStatus::Skipped, "skipped")
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
info!("Transport: Direct DC - TCP - standard DC-over-TCP");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Freeze config after possible fallback decision
|
||||||
|
let config = Arc::new(config);
|
||||||
|
|
||||||
|
let replay_checker = Arc::new(ReplayChecker::new(
|
||||||
|
config.access.replay_check_len,
|
||||||
|
Duration::from_secs(config.access.replay_window_secs),
|
||||||
|
));
|
||||||
|
|
||||||
|
let buffer_pool = Arc::new(BufferPool::with_config(16 * 1024, 4096));
|
||||||
|
|
||||||
|
connectivity::run_startup_connectivity(
|
||||||
|
&config,
|
||||||
|
&me_pool,
|
||||||
|
rng.clone(),
|
||||||
|
&startup_tracker,
|
||||||
|
upstream_manager.clone(),
|
||||||
|
prefer_ipv6,
|
||||||
|
&decision,
|
||||||
|
process_started_at,
|
||||||
|
api_me_pool.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let runtime_watches = runtime_tasks::spawn_runtime_tasks(
|
||||||
|
&config,
|
||||||
|
&config_path,
|
||||||
|
&probe,
|
||||||
|
prefer_ipv6,
|
||||||
|
decision.ipv4_dc,
|
||||||
|
decision.ipv6_dc,
|
||||||
|
&startup_tracker,
|
||||||
|
stats.clone(),
|
||||||
|
upstream_manager.clone(),
|
||||||
|
replay_checker.clone(),
|
||||||
|
me_pool.clone(),
|
||||||
|
rng.clone(),
|
||||||
|
ip_tracker.clone(),
|
||||||
|
beobachten.clone(),
|
||||||
|
api_config_tx.clone(),
|
||||||
|
me_pool.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let config_rx = runtime_watches.config_rx;
|
||||||
|
let log_level_rx = runtime_watches.log_level_rx;
|
||||||
|
let detected_ip_v4 = runtime_watches.detected_ip_v4;
|
||||||
|
let detected_ip_v6 = runtime_watches.detected_ip_v6;
|
||||||
|
|
||||||
|
admission::configure_admission_gate(
|
||||||
|
&config,
|
||||||
|
me_pool.clone(),
|
||||||
|
route_runtime.clone(),
|
||||||
|
&admission_tx,
|
||||||
|
config_rx.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let _admission_tx_hold = admission_tx;
|
||||||
|
|
||||||
|
let bound = listeners::bind_listeners(
|
||||||
|
&config,
|
||||||
|
decision.ipv4_dc,
|
||||||
|
decision.ipv6_dc,
|
||||||
|
detected_ip_v4,
|
||||||
|
detected_ip_v6,
|
||||||
|
&startup_tracker,
|
||||||
|
config_rx.clone(),
|
||||||
|
admission_rx.clone(),
|
||||||
|
stats.clone(),
|
||||||
|
upstream_manager.clone(),
|
||||||
|
replay_checker.clone(),
|
||||||
|
buffer_pool.clone(),
|
||||||
|
rng.clone(),
|
||||||
|
me_pool.clone(),
|
||||||
|
route_runtime.clone(),
|
||||||
|
tls_cache.clone(),
|
||||||
|
ip_tracker.clone(),
|
||||||
|
beobachten.clone(),
|
||||||
|
max_connections.clone(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
let listeners = bound.listeners;
|
||||||
|
let has_unix_listener = bound.has_unix_listener;
|
||||||
|
|
||||||
|
if listeners.is_empty() && !has_unix_listener {
|
||||||
|
error!("No listeners. Exiting.");
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
runtime_tasks::apply_runtime_log_filter(
|
||||||
|
has_rust_log,
|
||||||
|
&effective_log_level,
|
||||||
|
filter_handle,
|
||||||
|
log_level_rx,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
runtime_tasks::spawn_metrics_if_configured(
|
||||||
|
&config,
|
||||||
|
&startup_tracker,
|
||||||
|
stats.clone(),
|
||||||
|
beobachten.clone(),
|
||||||
|
ip_tracker.clone(),
|
||||||
|
config_rx.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
runtime_tasks::mark_runtime_ready(&startup_tracker).await;
|
||||||
|
|
||||||
|
listeners::spawn_tcp_accept_loops(
|
||||||
|
listeners,
|
||||||
|
config_rx.clone(),
|
||||||
|
admission_rx.clone(),
|
||||||
|
stats.clone(),
|
||||||
|
upstream_manager.clone(),
|
||||||
|
replay_checker.clone(),
|
||||||
|
buffer_pool.clone(),
|
||||||
|
rng.clone(),
|
||||||
|
me_pool.clone(),
|
||||||
|
route_runtime.clone(),
|
||||||
|
tls_cache.clone(),
|
||||||
|
ip_tracker.clone(),
|
||||||
|
beobachten.clone(),
|
||||||
|
max_connections.clone(),
|
||||||
|
);
|
||||||
|
|
||||||
|
shutdown::wait_for_shutdown(process_started_at, me_pool).await;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
317
src/maestro/runtime_tasks.rs
Normal file
317
src/maestro/runtime_tasks.rs
Normal file
@@ -0,0 +1,317 @@
|
|||||||
|
use std::net::IpAddr;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use tokio::sync::{mpsc, watch};
|
||||||
|
use tracing::{debug, warn};
|
||||||
|
use tracing_subscriber::reload;
|
||||||
|
use tracing_subscriber::EnvFilter;
|
||||||
|
|
||||||
|
use crate::config::{LogLevel, ProxyConfig};
|
||||||
|
use crate::config::hot_reload::spawn_config_watcher;
|
||||||
|
use crate::crypto::SecureRandom;
|
||||||
|
use crate::ip_tracker::UserIpTracker;
|
||||||
|
use crate::metrics;
|
||||||
|
use crate::network::probe::NetworkProbe;
|
||||||
|
use crate::startup::{COMPONENT_CONFIG_WATCHER_START, COMPONENT_METRICS_START, COMPONENT_RUNTIME_READY, StartupTracker};
|
||||||
|
use crate::stats::beobachten::BeobachtenStore;
|
||||||
|
use crate::stats::telemetry::TelemetryPolicy;
|
||||||
|
use crate::stats::{ReplayChecker, Stats};
|
||||||
|
use crate::transport::middle_proxy::{MePool, MeReinitTrigger};
|
||||||
|
use crate::transport::UpstreamManager;
|
||||||
|
|
||||||
|
use super::helpers::write_beobachten_snapshot;
|
||||||
|
|
||||||
|
pub(crate) struct RuntimeWatches {
|
||||||
|
pub(crate) config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||||
|
pub(crate) log_level_rx: watch::Receiver<LogLevel>,
|
||||||
|
pub(crate) detected_ip_v4: Option<IpAddr>,
|
||||||
|
pub(crate) detected_ip_v6: Option<IpAddr>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub(crate) async fn spawn_runtime_tasks(
|
||||||
|
config: &Arc<ProxyConfig>,
|
||||||
|
config_path: &str,
|
||||||
|
probe: &NetworkProbe,
|
||||||
|
prefer_ipv6: bool,
|
||||||
|
decision_ipv4_dc: bool,
|
||||||
|
decision_ipv6_dc: bool,
|
||||||
|
startup_tracker: &Arc<StartupTracker>,
|
||||||
|
stats: Arc<Stats>,
|
||||||
|
upstream_manager: Arc<UpstreamManager>,
|
||||||
|
replay_checker: Arc<ReplayChecker>,
|
||||||
|
me_pool: Option<Arc<MePool>>,
|
||||||
|
rng: Arc<SecureRandom>,
|
||||||
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
|
beobachten: Arc<BeobachtenStore>,
|
||||||
|
api_config_tx: watch::Sender<Arc<ProxyConfig>>,
|
||||||
|
me_pool_for_policy: Option<Arc<MePool>>,
|
||||||
|
) -> RuntimeWatches {
|
||||||
|
let um_clone = upstream_manager.clone();
|
||||||
|
let dc_overrides_for_health = config.dc_overrides.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
um_clone
|
||||||
|
.run_health_checks(
|
||||||
|
prefer_ipv6,
|
||||||
|
decision_ipv4_dc,
|
||||||
|
decision_ipv6_dc,
|
||||||
|
dc_overrides_for_health,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
});
|
||||||
|
|
||||||
|
let rc_clone = replay_checker.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
rc_clone.run_periodic_cleanup().await;
|
||||||
|
});
|
||||||
|
|
||||||
|
let detected_ip_v4: Option<IpAddr> = probe.detected_ipv4.map(IpAddr::V4);
|
||||||
|
let detected_ip_v6: Option<IpAddr> = probe.detected_ipv6.map(IpAddr::V6);
|
||||||
|
debug!(
|
||||||
|
"Detected IPs: v4={:?} v6={:?}",
|
||||||
|
detected_ip_v4, detected_ip_v6
|
||||||
|
);
|
||||||
|
|
||||||
|
startup_tracker
|
||||||
|
.start_component(
|
||||||
|
COMPONENT_CONFIG_WATCHER_START,
|
||||||
|
Some("spawn config hot-reload watcher".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let (config_rx, log_level_rx): (
|
||||||
|
watch::Receiver<Arc<ProxyConfig>>,
|
||||||
|
watch::Receiver<LogLevel>,
|
||||||
|
) = spawn_config_watcher(
|
||||||
|
PathBuf::from(config_path),
|
||||||
|
config.clone(),
|
||||||
|
detected_ip_v4,
|
||||||
|
detected_ip_v6,
|
||||||
|
);
|
||||||
|
startup_tracker
|
||||||
|
.complete_component(
|
||||||
|
COMPONENT_CONFIG_WATCHER_START,
|
||||||
|
Some("config hot-reload watcher started".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let mut config_rx_api_bridge = config_rx.clone();
|
||||||
|
let api_config_tx_bridge = api_config_tx.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
if config_rx_api_bridge.changed().await.is_err() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let cfg = config_rx_api_bridge.borrow_and_update().clone();
|
||||||
|
api_config_tx_bridge.send_replace(cfg);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let stats_policy = stats.clone();
|
||||||
|
let mut config_rx_policy = config_rx.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
if config_rx_policy.changed().await.is_err() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let cfg = config_rx_policy.borrow_and_update().clone();
|
||||||
|
stats_policy.apply_telemetry_policy(TelemetryPolicy::from_config(&cfg.general.telemetry));
|
||||||
|
if let Some(pool) = &me_pool_for_policy {
|
||||||
|
pool.update_runtime_transport_policy(
|
||||||
|
cfg.general.me_socks_kdf_policy,
|
||||||
|
cfg.general.me_route_backpressure_base_timeout_ms,
|
||||||
|
cfg.general.me_route_backpressure_high_timeout_ms,
|
||||||
|
cfg.general.me_route_backpressure_high_watermark_pct,
|
||||||
|
cfg.general.me_reader_route_data_wait_ms,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let ip_tracker_policy = ip_tracker.clone();
|
||||||
|
let mut config_rx_ip_limits = config_rx.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let mut prev_limits = config_rx_ip_limits.borrow().access.user_max_unique_ips.clone();
|
||||||
|
let mut prev_mode = config_rx_ip_limits.borrow().access.user_max_unique_ips_mode;
|
||||||
|
let mut prev_window = config_rx_ip_limits
|
||||||
|
.borrow()
|
||||||
|
.access
|
||||||
|
.user_max_unique_ips_window_secs;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
if config_rx_ip_limits.changed().await.is_err() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let cfg = config_rx_ip_limits.borrow_and_update().clone();
|
||||||
|
|
||||||
|
if prev_limits != cfg.access.user_max_unique_ips {
|
||||||
|
ip_tracker_policy.load_limits(&cfg.access.user_max_unique_ips).await;
|
||||||
|
prev_limits = cfg.access.user_max_unique_ips.clone();
|
||||||
|
}
|
||||||
|
|
||||||
|
if prev_mode != cfg.access.user_max_unique_ips_mode
|
||||||
|
|| prev_window != cfg.access.user_max_unique_ips_window_secs
|
||||||
|
{
|
||||||
|
ip_tracker_policy
|
||||||
|
.set_limit_policy(
|
||||||
|
cfg.access.user_max_unique_ips_mode,
|
||||||
|
cfg.access.user_max_unique_ips_window_secs,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
prev_mode = cfg.access.user_max_unique_ips_mode;
|
||||||
|
prev_window = cfg.access.user_max_unique_ips_window_secs;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let beobachten_writer = beobachten.clone();
|
||||||
|
let config_rx_beobachten = config_rx.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
let cfg = config_rx_beobachten.borrow().clone();
|
||||||
|
let sleep_secs = cfg.general.beobachten_flush_secs.max(1);
|
||||||
|
|
||||||
|
if cfg.general.beobachten {
|
||||||
|
let ttl = std::time::Duration::from_secs(cfg.general.beobachten_minutes.saturating_mul(60));
|
||||||
|
let path = cfg.general.beobachten_file.clone();
|
||||||
|
let snapshot = beobachten_writer.snapshot_text(ttl);
|
||||||
|
if let Err(e) = write_beobachten_snapshot(&path, &snapshot).await {
|
||||||
|
warn!(error = %e, path = %path, "Failed to flush beobachten snapshot");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tokio::time::sleep(std::time::Duration::from_secs(sleep_secs)).await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if let Some(pool) = me_pool {
|
||||||
|
let reinit_trigger_capacity = config.general.me_reinit_trigger_channel.max(1);
|
||||||
|
let (reinit_tx, reinit_rx) = mpsc::channel::<MeReinitTrigger>(reinit_trigger_capacity);
|
||||||
|
|
||||||
|
let pool_clone_sched = pool.clone();
|
||||||
|
let rng_clone_sched = rng.clone();
|
||||||
|
let config_rx_clone_sched = config_rx.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
crate::transport::middle_proxy::me_reinit_scheduler(
|
||||||
|
pool_clone_sched,
|
||||||
|
rng_clone_sched,
|
||||||
|
config_rx_clone_sched,
|
||||||
|
reinit_rx,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
});
|
||||||
|
|
||||||
|
let pool_clone = pool.clone();
|
||||||
|
let config_rx_clone = config_rx.clone();
|
||||||
|
let reinit_tx_updater = reinit_tx.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
crate::transport::middle_proxy::me_config_updater(
|
||||||
|
pool_clone,
|
||||||
|
config_rx_clone,
|
||||||
|
reinit_tx_updater,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
});
|
||||||
|
|
||||||
|
let config_rx_clone_rot = config_rx.clone();
|
||||||
|
let reinit_tx_rotation = reinit_tx.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
crate::transport::middle_proxy::me_rotation_task(config_rx_clone_rot, reinit_tx_rotation)
|
||||||
|
.await;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
RuntimeWatches {
|
||||||
|
config_rx,
|
||||||
|
log_level_rx,
|
||||||
|
detected_ip_v4,
|
||||||
|
detected_ip_v6,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn apply_runtime_log_filter(
|
||||||
|
has_rust_log: bool,
|
||||||
|
effective_log_level: &LogLevel,
|
||||||
|
filter_handle: reload::Handle<EnvFilter, tracing_subscriber::Registry>,
|
||||||
|
mut log_level_rx: watch::Receiver<LogLevel>,
|
||||||
|
) {
|
||||||
|
let runtime_filter = if has_rust_log {
|
||||||
|
EnvFilter::from_default_env()
|
||||||
|
} else if matches!(effective_log_level, LogLevel::Silent) {
|
||||||
|
EnvFilter::new("warn,telemt::links=info")
|
||||||
|
} else {
|
||||||
|
EnvFilter::new(effective_log_level.to_filter_str())
|
||||||
|
};
|
||||||
|
filter_handle
|
||||||
|
.reload(runtime_filter)
|
||||||
|
.expect("Failed to switch log filter");
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
if log_level_rx.changed().await.is_err() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let level = log_level_rx.borrow_and_update().clone();
|
||||||
|
let new_filter = tracing_subscriber::EnvFilter::new(level.to_filter_str());
|
||||||
|
if let Err(e) = filter_handle.reload(new_filter) {
|
||||||
|
tracing::error!("config reload: failed to update log filter: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn spawn_metrics_if_configured(
|
||||||
|
config: &Arc<ProxyConfig>,
|
||||||
|
startup_tracker: &Arc<StartupTracker>,
|
||||||
|
stats: Arc<Stats>,
|
||||||
|
beobachten: Arc<BeobachtenStore>,
|
||||||
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
|
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||||
|
) {
|
||||||
|
if let Some(port) = config.server.metrics_port {
|
||||||
|
startup_tracker
|
||||||
|
.start_component(
|
||||||
|
COMPONENT_METRICS_START,
|
||||||
|
Some(format!("spawn metrics endpoint on {}", port)),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let stats = stats.clone();
|
||||||
|
let beobachten = beobachten.clone();
|
||||||
|
let config_rx_metrics = config_rx.clone();
|
||||||
|
let ip_tracker_metrics = ip_tracker.clone();
|
||||||
|
let whitelist = config.server.metrics_whitelist.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
metrics::serve(
|
||||||
|
port,
|
||||||
|
stats,
|
||||||
|
beobachten,
|
||||||
|
ip_tracker_metrics,
|
||||||
|
config_rx_metrics,
|
||||||
|
whitelist,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
});
|
||||||
|
startup_tracker
|
||||||
|
.complete_component(
|
||||||
|
COMPONENT_METRICS_START,
|
||||||
|
Some("metrics task spawned".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
} else {
|
||||||
|
startup_tracker
|
||||||
|
.skip_component(
|
||||||
|
COMPONENT_METRICS_START,
|
||||||
|
Some("server.metrics_port is not configured".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn mark_runtime_ready(startup_tracker: &Arc<StartupTracker>) {
|
||||||
|
startup_tracker
|
||||||
|
.complete_component(
|
||||||
|
COMPONENT_RUNTIME_READY,
|
||||||
|
Some("startup pipeline is fully initialized".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
startup_tracker.mark_ready().await;
|
||||||
|
}
|
||||||
42
src/maestro/shutdown.rs
Normal file
42
src/maestro/shutdown.rs
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use tokio::signal;
|
||||||
|
use tracing::{error, info, warn};
|
||||||
|
|
||||||
|
use crate::transport::middle_proxy::MePool;
|
||||||
|
|
||||||
|
use super::helpers::{format_uptime, unit_label};
|
||||||
|
|
||||||
|
pub(crate) async fn wait_for_shutdown(process_started_at: Instant, me_pool: Option<Arc<MePool>>) {
|
||||||
|
match signal::ctrl_c().await {
|
||||||
|
Ok(()) => {
|
||||||
|
let shutdown_started_at = Instant::now();
|
||||||
|
info!("Shutting down...");
|
||||||
|
let uptime_secs = process_started_at.elapsed().as_secs();
|
||||||
|
info!("Uptime: {}", format_uptime(uptime_secs));
|
||||||
|
if let Some(pool) = &me_pool {
|
||||||
|
match tokio::time::timeout(Duration::from_secs(2), pool.shutdown_send_close_conn_all())
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(total) => {
|
||||||
|
info!(
|
||||||
|
close_conn_sent = total,
|
||||||
|
"ME shutdown: RPC_CLOSE_CONN broadcast completed"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
warn!("ME shutdown: RPC_CLOSE_CONN broadcast timed out");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let shutdown_secs = shutdown_started_at.elapsed().as_secs();
|
||||||
|
info!(
|
||||||
|
"Shutdown completed successfully in {} {}.",
|
||||||
|
shutdown_secs,
|
||||||
|
unit_label(shutdown_secs, "second", "seconds")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(e) => error!("Signal error: {}", e),
|
||||||
|
}
|
||||||
|
}
|
||||||
165
src/maestro/tls_bootstrap.rs
Normal file
165
src/maestro/tls_bootstrap.rs
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use rand::Rng;
|
||||||
|
use tracing::warn;
|
||||||
|
|
||||||
|
use crate::config::ProxyConfig;
|
||||||
|
use crate::startup::{COMPONENT_TLS_FRONT_BOOTSTRAP, StartupTracker};
|
||||||
|
use crate::tls_front::TlsFrontCache;
|
||||||
|
use crate::transport::UpstreamManager;
|
||||||
|
|
||||||
|
pub(crate) async fn bootstrap_tls_front(
|
||||||
|
config: &ProxyConfig,
|
||||||
|
tls_domains: &[String],
|
||||||
|
upstream_manager: Arc<UpstreamManager>,
|
||||||
|
startup_tracker: &Arc<StartupTracker>,
|
||||||
|
) -> Option<Arc<TlsFrontCache>> {
|
||||||
|
startup_tracker
|
||||||
|
.start_component(
|
||||||
|
COMPONENT_TLS_FRONT_BOOTSTRAP,
|
||||||
|
Some("initialize TLS front cache/bootstrap tasks".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let tls_cache: Option<Arc<TlsFrontCache>> = if config.censorship.tls_emulation {
|
||||||
|
let cache = Arc::new(TlsFrontCache::new(
|
||||||
|
tls_domains,
|
||||||
|
config.censorship.fake_cert_len,
|
||||||
|
&config.censorship.tls_front_dir,
|
||||||
|
));
|
||||||
|
cache.load_from_disk().await;
|
||||||
|
|
||||||
|
let port = config.censorship.mask_port;
|
||||||
|
let proxy_protocol = config.censorship.mask_proxy_protocol;
|
||||||
|
let mask_host = config
|
||||||
|
.censorship
|
||||||
|
.mask_host
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| config.censorship.tls_domain.clone());
|
||||||
|
let mask_unix_sock = config.censorship.mask_unix_sock.clone();
|
||||||
|
let fetch_timeout = Duration::from_secs(5);
|
||||||
|
|
||||||
|
let cache_initial = cache.clone();
|
||||||
|
let domains_initial = tls_domains.to_vec();
|
||||||
|
let host_initial = mask_host.clone();
|
||||||
|
let unix_sock_initial = mask_unix_sock.clone();
|
||||||
|
let upstream_initial = upstream_manager.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let mut join = tokio::task::JoinSet::new();
|
||||||
|
for domain in domains_initial {
|
||||||
|
let cache_domain = cache_initial.clone();
|
||||||
|
let host_domain = host_initial.clone();
|
||||||
|
let unix_sock_domain = unix_sock_initial.clone();
|
||||||
|
let upstream_domain = upstream_initial.clone();
|
||||||
|
join.spawn(async move {
|
||||||
|
match crate::tls_front::fetcher::fetch_real_tls(
|
||||||
|
&host_domain,
|
||||||
|
port,
|
||||||
|
&domain,
|
||||||
|
fetch_timeout,
|
||||||
|
Some(upstream_domain),
|
||||||
|
proxy_protocol,
|
||||||
|
unix_sock_domain.as_deref(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(res) => cache_domain.update_from_fetch(&domain, res).await,
|
||||||
|
Err(e) => {
|
||||||
|
warn!(domain = %domain, error = %e, "TLS emulation initial fetch failed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
while let Some(res) = join.join_next().await {
|
||||||
|
if let Err(e) = res {
|
||||||
|
warn!(error = %e, "TLS emulation initial fetch task join failed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let cache_timeout = cache.clone();
|
||||||
|
let domains_timeout = tls_domains.to_vec();
|
||||||
|
let fake_cert_len = config.censorship.fake_cert_len;
|
||||||
|
tokio::spawn(async move {
|
||||||
|
tokio::time::sleep(fetch_timeout).await;
|
||||||
|
for domain in domains_timeout {
|
||||||
|
let cached = cache_timeout.get(&domain).await;
|
||||||
|
if cached.domain == "default" {
|
||||||
|
warn!(
|
||||||
|
domain = %domain,
|
||||||
|
timeout_secs = fetch_timeout.as_secs(),
|
||||||
|
fake_cert_len,
|
||||||
|
"TLS-front fetch not ready within timeout; using cache/default fake cert fallback"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let cache_refresh = cache.clone();
|
||||||
|
let domains_refresh = tls_domains.to_vec();
|
||||||
|
let host_refresh = mask_host.clone();
|
||||||
|
let unix_sock_refresh = mask_unix_sock.clone();
|
||||||
|
let upstream_refresh = upstream_manager.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
let base_secs = rand::rng().random_range(4 * 3600..=6 * 3600);
|
||||||
|
let jitter_secs = rand::rng().random_range(0..=7200);
|
||||||
|
tokio::time::sleep(Duration::from_secs(base_secs + jitter_secs)).await;
|
||||||
|
|
||||||
|
let mut join = tokio::task::JoinSet::new();
|
||||||
|
for domain in domains_refresh.clone() {
|
||||||
|
let cache_domain = cache_refresh.clone();
|
||||||
|
let host_domain = host_refresh.clone();
|
||||||
|
let unix_sock_domain = unix_sock_refresh.clone();
|
||||||
|
let upstream_domain = upstream_refresh.clone();
|
||||||
|
join.spawn(async move {
|
||||||
|
match crate::tls_front::fetcher::fetch_real_tls(
|
||||||
|
&host_domain,
|
||||||
|
port,
|
||||||
|
&domain,
|
||||||
|
fetch_timeout,
|
||||||
|
Some(upstream_domain),
|
||||||
|
proxy_protocol,
|
||||||
|
unix_sock_domain.as_deref(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(res) => cache_domain.update_from_fetch(&domain, res).await,
|
||||||
|
Err(e) => {
|
||||||
|
warn!(domain = %domain, error = %e, "TLS emulation refresh failed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
while let Some(res) = join.join_next().await {
|
||||||
|
if let Err(e) = res {
|
||||||
|
warn!(error = %e, "TLS emulation refresh task join failed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Some(cache)
|
||||||
|
} else {
|
||||||
|
startup_tracker
|
||||||
|
.skip_component(
|
||||||
|
COMPONENT_TLS_FRONT_BOOTSTRAP,
|
||||||
|
Some("censorship.tls_emulation is false".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
if tls_cache.is_some() {
|
||||||
|
startup_tracker
|
||||||
|
.complete_component(
|
||||||
|
COMPONENT_TLS_FRONT_BOOTSTRAP,
|
||||||
|
Some("tls front cache is initialized".to_string()),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
tls_cache
|
||||||
|
}
|
||||||
1030
src/main.rs
1030
src/main.rs
File diff suppressed because it is too large
Load Diff
1802
src/metrics.rs
1802
src/metrics.rs
File diff suppressed because it is too large
Load Diff
197
src/network/dns_overrides.rs
Normal file
197
src/network/dns_overrides.rs
Normal file
@@ -0,0 +1,197 @@
|
|||||||
|
//! Runtime DNS overrides for `host:port` targets.
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::net::{IpAddr, Ipv6Addr, SocketAddr};
|
||||||
|
use std::sync::{OnceLock, RwLock};
|
||||||
|
|
||||||
|
use crate::error::{ProxyError, Result};
|
||||||
|
|
||||||
|
type OverrideMap = HashMap<(String, u16), IpAddr>;
|
||||||
|
|
||||||
|
static DNS_OVERRIDES: OnceLock<RwLock<OverrideMap>> = OnceLock::new();
|
||||||
|
|
||||||
|
fn overrides_store() -> &'static RwLock<OverrideMap> {
|
||||||
|
DNS_OVERRIDES.get_or_init(|| RwLock::new(HashMap::new()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_ip_spec(ip_spec: &str) -> Result<IpAddr> {
|
||||||
|
if ip_spec.starts_with('[') && ip_spec.ends_with(']') {
|
||||||
|
let inner = &ip_spec[1..ip_spec.len() - 1];
|
||||||
|
let ipv6 = inner.parse::<Ipv6Addr>().map_err(|_| {
|
||||||
|
ProxyError::Config(format!(
|
||||||
|
"network.dns_overrides IPv6 override is invalid: '{ip_spec}'"
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
return Ok(IpAddr::V6(ipv6));
|
||||||
|
}
|
||||||
|
|
||||||
|
let ip = ip_spec.parse::<IpAddr>().map_err(|_| {
|
||||||
|
ProxyError::Config(format!(
|
||||||
|
"network.dns_overrides IP is invalid: '{ip_spec}'"
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
if matches!(ip, IpAddr::V6(_)) {
|
||||||
|
return Err(ProxyError::Config(format!(
|
||||||
|
"network.dns_overrides IPv6 must be bracketed: '{ip_spec}'"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
Ok(ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_entry(entry: &str) -> Result<((String, u16), IpAddr)> {
|
||||||
|
let trimmed = entry.trim();
|
||||||
|
if trimmed.is_empty() {
|
||||||
|
return Err(ProxyError::Config(
|
||||||
|
"network.dns_overrides entry cannot be empty".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let first_sep = trimmed.find(':').ok_or_else(|| {
|
||||||
|
ProxyError::Config(format!(
|
||||||
|
"network.dns_overrides entry must use host:port:ip format: '{trimmed}'"
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
let second_sep = trimmed[first_sep + 1..]
|
||||||
|
.find(':')
|
||||||
|
.map(|idx| first_sep + 1 + idx)
|
||||||
|
.ok_or_else(|| {
|
||||||
|
ProxyError::Config(format!(
|
||||||
|
"network.dns_overrides entry must use host:port:ip format: '{trimmed}'"
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let host = trimmed[..first_sep].trim();
|
||||||
|
let port_str = trimmed[first_sep + 1..second_sep].trim();
|
||||||
|
let ip_str = trimmed[second_sep + 1..].trim();
|
||||||
|
|
||||||
|
if host.is_empty() {
|
||||||
|
return Err(ProxyError::Config(format!(
|
||||||
|
"network.dns_overrides host cannot be empty: '{trimmed}'"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
if host.contains(':') {
|
||||||
|
return Err(ProxyError::Config(format!(
|
||||||
|
"network.dns_overrides host must be a domain name without ':' in this format: '{trimmed}'"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let port = port_str.parse::<u16>().map_err(|_| {
|
||||||
|
ProxyError::Config(format!(
|
||||||
|
"network.dns_overrides port is invalid: '{trimmed}'"
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
let ip = parse_ip_spec(ip_str)?;
|
||||||
|
|
||||||
|
Ok(((host.to_ascii_lowercase(), port), ip))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_entries(entries: &[String]) -> Result<OverrideMap> {
|
||||||
|
let mut parsed = HashMap::new();
|
||||||
|
for entry in entries {
|
||||||
|
let (key, ip) = parse_entry(entry)?;
|
||||||
|
parsed.insert(key, ip);
|
||||||
|
}
|
||||||
|
Ok(parsed)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validate `network.dns_overrides` entries without updating runtime state.
|
||||||
|
pub fn validate_entries(entries: &[String]) -> Result<()> {
|
||||||
|
let _ = parse_entries(entries)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Replace runtime DNS overrides with a new validated snapshot.
|
||||||
|
pub fn install_entries(entries: &[String]) -> Result<()> {
|
||||||
|
let parsed = parse_entries(entries)?;
|
||||||
|
let mut guard = overrides_store()
|
||||||
|
.write()
|
||||||
|
.map_err(|_| ProxyError::Config("network.dns_overrides runtime lock is poisoned".to_string()))?;
|
||||||
|
*guard = parsed;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resolve a hostname override for `(host, port)` if present.
|
||||||
|
pub fn resolve(host: &str, port: u16) -> Option<IpAddr> {
|
||||||
|
let key = (host.to_ascii_lowercase(), port);
|
||||||
|
overrides_store()
|
||||||
|
.read()
|
||||||
|
.ok()
|
||||||
|
.and_then(|guard| guard.get(&key).copied())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resolve a hostname override and construct a socket address when present.
|
||||||
|
pub fn resolve_socket_addr(host: &str, port: u16) -> Option<SocketAddr> {
|
||||||
|
resolve(host, port).map(|ip| SocketAddr::new(ip, port))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse a runtime endpoint in `host:port` format.
|
||||||
|
///
|
||||||
|
/// Supports:
|
||||||
|
/// - `example.com:443`
|
||||||
|
/// - `[2001:db8::1]:443`
|
||||||
|
pub fn split_host_port(endpoint: &str) -> Option<(String, u16)> {
|
||||||
|
if endpoint.starts_with('[') {
|
||||||
|
let bracket_end = endpoint.find(']')?;
|
||||||
|
if endpoint.as_bytes().get(bracket_end + 1) != Some(&b':') {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let host = endpoint[1..bracket_end].trim();
|
||||||
|
let port = endpoint[bracket_end + 2..].trim().parse::<u16>().ok()?;
|
||||||
|
if host.is_empty() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
return Some((host.to_ascii_lowercase(), port));
|
||||||
|
}
|
||||||
|
|
||||||
|
let split_idx = endpoint.rfind(':')?;
|
||||||
|
let host = endpoint[..split_idx].trim();
|
||||||
|
let port = endpoint[split_idx + 1..].trim().parse::<u16>().ok()?;
|
||||||
|
if host.is_empty() || host.contains(':') {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
Some((host.to_ascii_lowercase(), port))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn validate_accepts_ipv4_and_bracketed_ipv6() {
|
||||||
|
let entries = vec![
|
||||||
|
"example.com:443:127.0.0.1".to_string(),
|
||||||
|
"example.net:8443:[2001:db8::10]".to_string(),
|
||||||
|
];
|
||||||
|
assert!(validate_entries(&entries).is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn validate_rejects_unbracketed_ipv6() {
|
||||||
|
let entries = vec!["example.net:443:2001:db8::10".to_string()];
|
||||||
|
let err = validate_entries(&entries).unwrap_err().to_string();
|
||||||
|
assert!(err.contains("must be bracketed"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn install_and_resolve_are_case_insensitive_for_host() {
|
||||||
|
let entries = vec!["MyPetrovich.ru:8443:127.0.0.1".to_string()];
|
||||||
|
install_entries(&entries).unwrap();
|
||||||
|
|
||||||
|
let resolved = resolve("mypetrovich.ru", 8443);
|
||||||
|
assert_eq!(resolved, Some("127.0.0.1".parse().unwrap()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn split_host_port_parses_supported_shapes() {
|
||||||
|
assert_eq!(
|
||||||
|
split_host_port("example.com:443"),
|
||||||
|
Some(("example.com".to_string(), 443))
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
split_host_port("[2001:db8::1]:443"),
|
||||||
|
Some(("2001:db8::1".to_string(), 443))
|
||||||
|
);
|
||||||
|
assert_eq!(split_host_port("2001:db8::1:443"), None);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
|
pub mod dns_overrides;
|
||||||
pub mod probe;
|
pub mod probe;
|
||||||
pub mod stun;
|
pub mod stun;
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +1,16 @@
|
|||||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use tracing::{info, warn};
|
use std::collections::HashMap;
|
||||||
|
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use tokio::task::JoinSet;
|
||||||
|
use tokio::time::timeout;
|
||||||
|
use tracing::{debug, info, warn};
|
||||||
|
|
||||||
use crate::config::NetworkConfig;
|
use crate::config::NetworkConfig;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::network::stun::{stun_probe_dual, DualStunResult, IpFamily};
|
use crate::network::stun::{stun_probe_dual, DualStunResult, IpFamily, StunProbeResult};
|
||||||
|
|
||||||
#[derive(Debug, Clone, Default)]
|
#[derive(Debug, Clone, Default)]
|
||||||
pub struct NetworkProbe {
|
pub struct NetworkProbe {
|
||||||
@@ -47,7 +53,13 @@ impl NetworkDecision {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn run_probe(config: &NetworkConfig, stun_addr: Option<String>, nat_probe: bool) -> Result<NetworkProbe> {
|
const STUN_BATCH_TIMEOUT: Duration = Duration::from_secs(5);
|
||||||
|
|
||||||
|
pub async fn run_probe(
|
||||||
|
config: &NetworkConfig,
|
||||||
|
nat_probe: bool,
|
||||||
|
stun_nat_probe_concurrency: usize,
|
||||||
|
) -> Result<NetworkProbe> {
|
||||||
let mut probe = NetworkProbe::default();
|
let mut probe = NetworkProbe::default();
|
||||||
|
|
||||||
probe.detected_ipv4 = detect_local_ip_v4();
|
probe.detected_ipv4 = detect_local_ip_v4();
|
||||||
@@ -56,21 +68,38 @@ pub async fn run_probe(config: &NetworkConfig, stun_addr: Option<String>, nat_pr
|
|||||||
probe.ipv4_is_bogon = probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false);
|
probe.ipv4_is_bogon = probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false);
|
||||||
probe.ipv6_is_bogon = probe.detected_ipv6.map(is_bogon_v6).unwrap_or(false);
|
probe.ipv6_is_bogon = probe.detected_ipv6.map(is_bogon_v6).unwrap_or(false);
|
||||||
|
|
||||||
let stun_server = stun_addr.unwrap_or_else(|| "stun.l.google.com:19302".to_string());
|
let stun_res = if nat_probe && config.stun_use {
|
||||||
let stun_res = if nat_probe {
|
let servers = collect_stun_servers(config);
|
||||||
match stun_probe_dual(&stun_server).await {
|
if servers.is_empty() {
|
||||||
Ok(res) => res,
|
warn!("STUN probe is enabled but network.stun_servers is empty");
|
||||||
Err(e) => {
|
|
||||||
warn!(error = %e, "STUN probe failed, continuing without reflection");
|
|
||||||
DualStunResult::default()
|
DualStunResult::default()
|
||||||
|
} else {
|
||||||
|
probe_stun_servers_parallel(
|
||||||
|
&servers,
|
||||||
|
stun_nat_probe_concurrency.max(1),
|
||||||
|
)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
}
|
} else if nat_probe {
|
||||||
|
info!("STUN probe is disabled by network.stun_use=false");
|
||||||
|
DualStunResult::default()
|
||||||
} else {
|
} else {
|
||||||
DualStunResult::default()
|
DualStunResult::default()
|
||||||
};
|
};
|
||||||
probe.reflected_ipv4 = stun_res.v4.map(|r| r.reflected_addr);
|
probe.reflected_ipv4 = stun_res.v4.map(|r| r.reflected_addr);
|
||||||
probe.reflected_ipv6 = stun_res.v6.map(|r| r.reflected_addr);
|
probe.reflected_ipv6 = stun_res.v6.map(|r| r.reflected_addr);
|
||||||
|
|
||||||
|
// If STUN is blocked but IPv4 is private, try HTTP public-IP fallback.
|
||||||
|
if nat_probe
|
||||||
|
&& probe.reflected_ipv4.is_none()
|
||||||
|
&& probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false)
|
||||||
|
{
|
||||||
|
if let Some(public_ip) = detect_public_ipv4_http(&config.http_ip_detect_urls).await {
|
||||||
|
probe.reflected_ipv4 = Some(SocketAddr::new(IpAddr::V4(public_ip), 0));
|
||||||
|
info!(public_ip = %public_ip, "STUN unavailable, using HTTP public IPv4 fallback");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
probe.ipv4_nat_detected = match (probe.detected_ipv4, probe.reflected_ipv4) {
|
probe.ipv4_nat_detected = match (probe.detected_ipv4, probe.reflected_ipv4) {
|
||||||
(Some(det), Some(reflected)) => det != reflected.ip(),
|
(Some(det), Some(reflected)) => det != reflected.ip(),
|
||||||
_ => false,
|
_ => false,
|
||||||
@@ -92,24 +121,127 @@ pub async fn run_probe(config: &NetworkConfig, stun_addr: Option<String>, nat_pr
|
|||||||
Ok(probe)
|
Ok(probe)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn detect_public_ipv4_http(urls: &[String]) -> Option<Ipv4Addr> {
|
||||||
|
let client = reqwest::Client::builder()
|
||||||
|
.timeout(Duration::from_secs(3))
|
||||||
|
.build()
|
||||||
|
.ok()?;
|
||||||
|
|
||||||
|
for url in urls {
|
||||||
|
let response = match client.get(url).send().await {
|
||||||
|
Ok(response) => response,
|
||||||
|
Err(_) => continue,
|
||||||
|
};
|
||||||
|
|
||||||
|
let body = match response.text().await {
|
||||||
|
Ok(body) => body,
|
||||||
|
Err(_) => continue,
|
||||||
|
};
|
||||||
|
|
||||||
|
let Ok(ip) = body.trim().parse::<Ipv4Addr>() else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
if !is_bogon_v4(ip) {
|
||||||
|
return Some(ip);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
fn collect_stun_servers(config: &NetworkConfig) -> Vec<String> {
|
||||||
|
let mut out = Vec::new();
|
||||||
|
for s in &config.stun_servers {
|
||||||
|
if !s.is_empty() && !out.contains(s) {
|
||||||
|
out.push(s.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn probe_stun_servers_parallel(
|
||||||
|
servers: &[String],
|
||||||
|
concurrency: usize,
|
||||||
|
) -> DualStunResult {
|
||||||
|
let mut join_set = JoinSet::new();
|
||||||
|
let mut next_idx = 0usize;
|
||||||
|
let mut best_v4_by_ip: HashMap<IpAddr, (usize, StunProbeResult)> = HashMap::new();
|
||||||
|
let mut best_v6_by_ip: HashMap<IpAddr, (usize, StunProbeResult)> = HashMap::new();
|
||||||
|
|
||||||
|
while next_idx < servers.len() || !join_set.is_empty() {
|
||||||
|
while next_idx < servers.len() && join_set.len() < concurrency {
|
||||||
|
let stun_addr = servers[next_idx].clone();
|
||||||
|
next_idx += 1;
|
||||||
|
join_set.spawn(async move {
|
||||||
|
let res = timeout(STUN_BATCH_TIMEOUT, stun_probe_dual(&stun_addr)).await;
|
||||||
|
(stun_addr, res)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some(task) = join_set.join_next().await else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
match task {
|
||||||
|
Ok((stun_addr, Ok(Ok(result)))) => {
|
||||||
|
if let Some(v4) = result.v4 {
|
||||||
|
let entry = best_v4_by_ip.entry(v4.reflected_addr.ip()).or_insert((0, v4));
|
||||||
|
entry.0 += 1;
|
||||||
|
}
|
||||||
|
if let Some(v6) = result.v6 {
|
||||||
|
let entry = best_v6_by_ip.entry(v6.reflected_addr.ip()).or_insert((0, v6));
|
||||||
|
entry.0 += 1;
|
||||||
|
}
|
||||||
|
if result.v4.is_some() || result.v6.is_some() {
|
||||||
|
debug!(stun = %stun_addr, "STUN server responded within probe timeout");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok((stun_addr, Ok(Err(e)))) => {
|
||||||
|
debug!(error = %e, stun = %stun_addr, "STUN probe failed");
|
||||||
|
}
|
||||||
|
Ok((stun_addr, Err(_))) => {
|
||||||
|
debug!(stun = %stun_addr, "STUN probe timeout");
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
debug!(error = %e, "STUN probe task join failed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut out = DualStunResult::default();
|
||||||
|
if let Some((_, best)) = best_v4_by_ip
|
||||||
|
.into_values()
|
||||||
|
.max_by_key(|(count, _)| *count)
|
||||||
|
{
|
||||||
|
info!("STUN-Quorum reached, IP: {}", best.reflected_addr.ip());
|
||||||
|
out.v4 = Some(best);
|
||||||
|
}
|
||||||
|
if let Some((_, best)) = best_v6_by_ip
|
||||||
|
.into_values()
|
||||||
|
.max_by_key(|(count, _)| *count)
|
||||||
|
{
|
||||||
|
info!("STUN-Quorum reached, IP: {}", best.reflected_addr.ip());
|
||||||
|
out.v6 = Some(best);
|
||||||
|
}
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
pub fn decide_network_capabilities(config: &NetworkConfig, probe: &NetworkProbe) -> NetworkDecision {
|
pub fn decide_network_capabilities(config: &NetworkConfig, probe: &NetworkProbe) -> NetworkDecision {
|
||||||
let mut decision = NetworkDecision::default();
|
let ipv4_dc = config.ipv4 && probe.detected_ipv4.is_some();
|
||||||
|
let ipv6_dc = config.ipv6.unwrap_or(probe.detected_ipv6.is_some()) && probe.detected_ipv6.is_some();
|
||||||
|
|
||||||
decision.ipv4_dc = config.ipv4 && probe.detected_ipv4.is_some();
|
let ipv4_me = config.ipv4
|
||||||
decision.ipv6_dc = config.ipv6.unwrap_or(probe.detected_ipv6.is_some()) && probe.detected_ipv6.is_some();
|
|
||||||
|
|
||||||
decision.ipv4_me = config.ipv4
|
|
||||||
&& probe.detected_ipv4.is_some()
|
&& probe.detected_ipv4.is_some()
|
||||||
&& (!probe.ipv4_is_bogon || probe.reflected_ipv4.is_some());
|
&& (!probe.ipv4_is_bogon || probe.reflected_ipv4.is_some());
|
||||||
|
|
||||||
let ipv6_enabled = config.ipv6.unwrap_or(probe.detected_ipv6.is_some());
|
let ipv6_enabled = config.ipv6.unwrap_or(probe.detected_ipv6.is_some());
|
||||||
decision.ipv6_me = ipv6_enabled
|
let ipv6_me = ipv6_enabled
|
||||||
&& probe.detected_ipv6.is_some()
|
&& probe.detected_ipv6.is_some()
|
||||||
&& (!probe.ipv6_is_bogon || probe.reflected_ipv6.is_some());
|
&& (!probe.ipv6_is_bogon || probe.reflected_ipv6.is_some());
|
||||||
|
|
||||||
decision.effective_prefer = match config.prefer {
|
let effective_prefer = match config.prefer {
|
||||||
6 if decision.ipv6_me || decision.ipv6_dc => 6,
|
6 if ipv6_me || ipv6_dc => 6,
|
||||||
4 if decision.ipv4_me || decision.ipv4_dc => 4,
|
4 if ipv4_me || ipv4_dc => 4,
|
||||||
6 => {
|
6 => {
|
||||||
warn!("prefer=6 requested but IPv6 unavailable; falling back to IPv4");
|
warn!("prefer=6 requested but IPv6 unavailable; falling back to IPv4");
|
||||||
4
|
4
|
||||||
@@ -117,10 +249,17 @@ pub fn decide_network_capabilities(config: &NetworkConfig, probe: &NetworkProbe)
|
|||||||
_ => 4,
|
_ => 4,
|
||||||
};
|
};
|
||||||
|
|
||||||
let me_families = decision.ipv4_me as u8 + decision.ipv6_me as u8;
|
let me_families = ipv4_me as u8 + ipv6_me as u8;
|
||||||
decision.effective_multipath = config.multipath && me_families >= 2;
|
let effective_multipath = config.multipath && me_families >= 2;
|
||||||
|
|
||||||
decision
|
NetworkDecision {
|
||||||
|
ipv4_dc,
|
||||||
|
ipv6_dc,
|
||||||
|
ipv4_me,
|
||||||
|
ipv6_me,
|
||||||
|
effective_prefer,
|
||||||
|
effective_multipath,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn detect_local_ip_v4() -> Option<Ipv4Addr> {
|
fn detect_local_ip_v4() -> Option<Ipv4Addr> {
|
||||||
@@ -141,6 +280,14 @@ fn detect_local_ip_v6() -> Option<Ipv6Addr> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn detect_interface_ipv4() -> Option<Ipv4Addr> {
|
||||||
|
detect_local_ip_v4()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn detect_interface_ipv6() -> Option<Ipv6Addr> {
|
||||||
|
detect_local_ip_v6()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn is_bogon(ip: IpAddr) -> bool {
|
pub fn is_bogon(ip: IpAddr) -> bool {
|
||||||
match ip {
|
match ip {
|
||||||
IpAddr::V4(v4) => is_bogon_v4(v4),
|
IpAddr::V4(v4) => is_bogon_v4(v4),
|
||||||
|
|||||||
@@ -1,9 +1,13 @@
|
|||||||
|
#![allow(unreachable_code)]
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
|
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
|
||||||
|
|
||||||
use tokio::net::{lookup_host, UdpSocket};
|
use tokio::net::{lookup_host, UdpSocket};
|
||||||
use tokio::time::{timeout, Duration, sleep};
|
use tokio::time::{timeout, Duration, sleep};
|
||||||
|
|
||||||
use crate::error::{ProxyError, Result};
|
use crate::error::{ProxyError, Result};
|
||||||
|
use crate::network::dns_overrides::{resolve, split_host_port};
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||||
pub enum IpFamily {
|
pub enum IpFamily {
|
||||||
@@ -37,16 +41,31 @@ pub async fn stun_probe_dual(stun_addr: &str) -> Result<DualStunResult> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn stun_probe_family(stun_addr: &str, family: IpFamily) -> Result<Option<StunProbeResult>> {
|
pub async fn stun_probe_family(stun_addr: &str, family: IpFamily) -> Result<Option<StunProbeResult>> {
|
||||||
|
stun_probe_family_with_bind(stun_addr, family, None).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn stun_probe_family_with_bind(
|
||||||
|
stun_addr: &str,
|
||||||
|
family: IpFamily,
|
||||||
|
bind_ip: Option<IpAddr>,
|
||||||
|
) -> Result<Option<StunProbeResult>> {
|
||||||
use rand::RngCore;
|
use rand::RngCore;
|
||||||
|
|
||||||
let bind_addr = match family {
|
let bind_addr = match (family, bind_ip) {
|
||||||
IpFamily::V4 => "0.0.0.0:0",
|
(IpFamily::V4, Some(IpAddr::V4(ip))) => SocketAddr::new(IpAddr::V4(ip), 0),
|
||||||
IpFamily::V6 => "[::]:0",
|
(IpFamily::V6, Some(IpAddr::V6(ip))) => SocketAddr::new(IpAddr::V6(ip), 0),
|
||||||
|
(IpFamily::V4, Some(IpAddr::V6(_))) | (IpFamily::V6, Some(IpAddr::V4(_))) => {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
(IpFamily::V4, None) => SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0),
|
||||||
|
(IpFamily::V6, None) => SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0),
|
||||||
};
|
};
|
||||||
|
|
||||||
let socket = UdpSocket::bind(bind_addr)
|
let socket = match UdpSocket::bind(bind_addr).await {
|
||||||
.await
|
Ok(socket) => socket,
|
||||||
.map_err(|e| ProxyError::Proxy(format!("STUN bind failed: {e}")))?;
|
Err(_) if bind_ip.is_some() => return Ok(None),
|
||||||
|
Err(e) => return Err(ProxyError::Proxy(format!("STUN bind failed: {e}"))),
|
||||||
|
};
|
||||||
|
|
||||||
let target_addr = resolve_stun_addr(stun_addr, family).await?;
|
let target_addr = resolve_stun_addr(stun_addr, family).await?;
|
||||||
if let Some(addr) = target_addr {
|
if let Some(addr) = target_addr {
|
||||||
@@ -195,16 +214,21 @@ async fn resolve_stun_addr(stun_addr: &str, family: IpFamily) -> Result<Option<S
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let addrs = lookup_host(stun_addr)
|
if let Some((host, port)) = split_host_port(stun_addr)
|
||||||
|
&& let Some(ip) = resolve(&host, port)
|
||||||
|
{
|
||||||
|
let addr = SocketAddr::new(ip, port);
|
||||||
|
return Ok(match (addr.is_ipv4(), family) {
|
||||||
|
(true, IpFamily::V4) | (false, IpFamily::V6) => Some(addr),
|
||||||
|
_ => None,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut addrs = lookup_host(stun_addr)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| ProxyError::Proxy(format!("STUN resolve failed: {e}")))?;
|
.map_err(|e| ProxyError::Proxy(format!("STUN resolve failed: {e}")))?;
|
||||||
|
|
||||||
let target = addrs
|
let target = addrs
|
||||||
.filter(|a| match (a.is_ipv4(), family) {
|
.find(|a| matches!((a.is_ipv4(), family), (true, IpFamily::V4) | (false, IpFamily::V6)));
|
||||||
(true, IpFamily::V4) => true,
|
|
||||||
(false, IpFamily::V6) => true,
|
|
||||||
_ => false,
|
|
||||||
})
|
|
||||||
.next();
|
|
||||||
Ok(target)
|
Ok(target)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
//! Protocol constants and datacenter addresses
|
//! Protocol constants and datacenter addresses
|
||||||
|
|
||||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
#![allow(dead_code)]
|
||||||
|
|
||||||
|
use std::net::{IpAddr, Ipv4Addr};
|
||||||
|
|
||||||
use crate::crypto::SecureRandom;
|
use crate::crypto::SecureRandom;
|
||||||
use std::sync::LazyLock;
|
use std::sync::LazyLock;
|
||||||
@@ -158,7 +160,7 @@ pub const MAX_TLS_CHUNK_SIZE: usize = 16384 + 256;
|
|||||||
|
|
||||||
/// Secure Intermediate payload is expected to be 4-byte aligned.
|
/// Secure Intermediate payload is expected to be 4-byte aligned.
|
||||||
pub fn is_valid_secure_payload_len(data_len: usize) -> bool {
|
pub fn is_valid_secure_payload_len(data_len: usize) -> bool {
|
||||||
data_len % 4 == 0
|
data_len.is_multiple_of(4)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Compute Secure Intermediate payload length from wire length.
|
/// Compute Secure Intermediate payload length from wire length.
|
||||||
@@ -177,7 +179,7 @@ pub fn secure_padding_len(data_len: usize, rng: &SecureRandom) -> usize {
|
|||||||
is_valid_secure_payload_len(data_len),
|
is_valid_secure_payload_len(data_len),
|
||||||
"Secure payload must be 4-byte aligned, got {data_len}"
|
"Secure payload must be 4-byte aligned, got {data_len}"
|
||||||
);
|
);
|
||||||
(rng.range(3) + 1) as usize
|
rng.range(3) + 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// ============= Timeouts =============
|
// ============= Timeouts =============
|
||||||
@@ -229,7 +231,6 @@ pub static RESERVED_NONCE_CONTINUES: &[[u8; 4]] = &[
|
|||||||
// ============= RPC Constants (for Middle Proxy) =============
|
// ============= RPC Constants (for Middle Proxy) =============
|
||||||
|
|
||||||
/// RPC Proxy Request
|
/// RPC Proxy Request
|
||||||
|
|
||||||
/// RPC Flags (from Erlang mtp_rpc.erl)
|
/// RPC Flags (from Erlang mtp_rpc.erl)
|
||||||
pub const RPC_FLAG_NOT_ENCRYPTED: u32 = 0x2;
|
pub const RPC_FLAG_NOT_ENCRYPTED: u32 = 0x2;
|
||||||
pub const RPC_FLAG_HAS_AD_TAG: u32 = 0x8;
|
pub const RPC_FLAG_HAS_AD_TAG: u32 = 0x8;
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
//! MTProto frame types and metadata
|
//! MTProto frame types and metadata
|
||||||
|
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
/// Extra metadata associated with a frame
|
/// Extra metadata associated with a frame
|
||||||
@@ -83,7 +85,7 @@ impl FrameMode {
|
|||||||
pub fn validate_message_length(len: usize) -> bool {
|
pub fn validate_message_length(len: usize) -> bool {
|
||||||
use super::constants::{MIN_MSG_LEN, MAX_MSG_LEN, PADDING_FILLER};
|
use super::constants::{MIN_MSG_LEN, MAX_MSG_LEN, PADDING_FILLER};
|
||||||
|
|
||||||
len >= MIN_MSG_LEN && len <= MAX_MSG_LEN && len % PADDING_FILLER.len() == 0
|
(MIN_MSG_LEN..=MAX_MSG_LEN).contains(&len) && len.is_multiple_of(PADDING_FILLER.len())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -5,7 +5,11 @@ pub mod frame;
|
|||||||
pub mod obfuscation;
|
pub mod obfuscation;
|
||||||
pub mod tls;
|
pub mod tls;
|
||||||
|
|
||||||
|
#[allow(unused_imports)]
|
||||||
pub use constants::*;
|
pub use constants::*;
|
||||||
|
#[allow(unused_imports)]
|
||||||
pub use frame::*;
|
pub use frame::*;
|
||||||
|
#[allow(unused_imports)]
|
||||||
pub use obfuscation::*;
|
pub use obfuscation::*;
|
||||||
|
#[allow(unused_imports)]
|
||||||
pub use tls::*;
|
pub use tls::*;
|
||||||
@@ -1,8 +1,9 @@
|
|||||||
//! MTProto Obfuscation
|
//! MTProto Obfuscation
|
||||||
|
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use zeroize::Zeroize;
|
use zeroize::Zeroize;
|
||||||
use crate::crypto::{sha256, AesCtr};
|
use crate::crypto::{sha256, AesCtr};
|
||||||
use crate::error::Result;
|
|
||||||
use super::constants::*;
|
use super::constants::*;
|
||||||
|
|
||||||
/// Obfuscation parameters from handshake
|
/// Obfuscation parameters from handshake
|
||||||
|
|||||||
@@ -4,8 +4,11 @@
|
|||||||
//! for domain fronting. The handshake looks like valid TLS 1.3 but
|
//! for domain fronting. The handshake looks like valid TLS 1.3 but
|
||||||
//! actually carries MTProto authentication data.
|
//! actually carries MTProto authentication data.
|
||||||
|
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use crate::crypto::{sha256_hmac, SecureRandom};
|
use crate::crypto::{sha256_hmac, SecureRandom};
|
||||||
use crate::error::{ProxyError, Result};
|
#[cfg(test)]
|
||||||
|
use crate::error::ProxyError;
|
||||||
use super::constants::*;
|
use super::constants::*;
|
||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
use num_bigint::BigUint;
|
use num_bigint::BigUint;
|
||||||
@@ -332,7 +335,7 @@ pub fn validate_tls_handshake(
|
|||||||
// This is a quirk in some clients that use uptime instead of real time
|
// This is a quirk in some clients that use uptime instead of real time
|
||||||
let is_boot_time = timestamp < 60 * 60 * 24 * 1000; // < ~2.7 years in seconds
|
let is_boot_time = timestamp < 60 * 60 * 24 * 1000; // < ~2.7 years in seconds
|
||||||
|
|
||||||
if !is_boot_time && (time_diff < TIME_SKEW_MIN || time_diff > TIME_SKEW_MAX) {
|
if !is_boot_time && !(TIME_SKEW_MIN..=TIME_SKEW_MAX).contains(&time_diff) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -390,7 +393,7 @@ pub fn build_server_hello(
|
|||||||
) -> Vec<u8> {
|
) -> Vec<u8> {
|
||||||
const MIN_APP_DATA: usize = 64;
|
const MIN_APP_DATA: usize = 64;
|
||||||
const MAX_APP_DATA: usize = 16640; // RFC 8446 §5.2 upper bound
|
const MAX_APP_DATA: usize = 16640; // RFC 8446 §5.2 upper bound
|
||||||
let fake_cert_len = fake_cert_len.max(MIN_APP_DATA).min(MAX_APP_DATA);
|
let fake_cert_len = fake_cert_len.clamp(MIN_APP_DATA, MAX_APP_DATA);
|
||||||
let x25519_key = gen_fake_x25519_key(rng);
|
let x25519_key = gen_fake_x25519_key(rng);
|
||||||
|
|
||||||
// Build ServerHello
|
// Build ServerHello
|
||||||
@@ -522,11 +525,11 @@ pub fn extract_sni_from_client_hello(handshake: &[u8]) -> Option<String> {
|
|||||||
if sn_pos + name_len > sn_end {
|
if sn_pos + name_len > sn_end {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if name_type == 0 && name_len > 0 {
|
if name_type == 0 && name_len > 0
|
||||||
if let Ok(host) = std::str::from_utf8(&handshake[sn_pos..sn_pos + name_len]) {
|
&& let Ok(host) = std::str::from_utf8(&handshake[sn_pos..sn_pos + name_len])
|
||||||
|
{
|
||||||
return Some(host.to_string());
|
return Some(host.to_string());
|
||||||
}
|
}
|
||||||
}
|
|
||||||
sn_pos += name_len;
|
sn_pos += name_len;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -568,7 +571,7 @@ pub fn extract_alpn_from_client_hello(handshake: &[u8]) -> Vec<Vec<u8>> {
|
|||||||
let list_len = u16::from_be_bytes([handshake[pos], handshake[pos+1]]) as usize;
|
let list_len = u16::from_be_bytes([handshake[pos], handshake[pos+1]]) as usize;
|
||||||
let mut lp = pos + 2;
|
let mut lp = pos + 2;
|
||||||
let list_end = (pos + 2).saturating_add(list_len).min(pos + elen);
|
let list_end = (pos + 2).saturating_add(list_len).min(pos + elen);
|
||||||
while lp + 1 <= list_end {
|
while lp < list_end {
|
||||||
let plen = handshake[lp] as usize;
|
let plen = handshake[lp] as usize;
|
||||||
lp += 1;
|
lp += 1;
|
||||||
if lp + plen > list_end { break; }
|
if lp + plen > list_end { break; }
|
||||||
@@ -613,7 +616,7 @@ pub fn parse_tls_record_header(header: &[u8; 5]) -> Option<(u8, u16)> {
|
|||||||
///
|
///
|
||||||
/// This is useful for testing that our ServerHello is well-formed.
|
/// This is useful for testing that our ServerHello is well-formed.
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn validate_server_hello_structure(data: &[u8]) -> Result<()> {
|
fn validate_server_hello_structure(data: &[u8]) -> Result<(), ProxyError> {
|
||||||
if data.len() < 5 {
|
if data.len() < 5 {
|
||||||
return Err(ProxyError::InvalidTlsRecord {
|
return Err(ProxyError::InvalidTlsRecord {
|
||||||
record_type: 0,
|
record_type: 0,
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
//! Client Handler
|
//! Client Handler
|
||||||
|
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::net::SocketAddr;
|
use std::net::{IpAddr, SocketAddr};
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@@ -27,6 +27,7 @@ use crate::error::{HandshakeResult, ProxyError, Result};
|
|||||||
use crate::ip_tracker::UserIpTracker;
|
use crate::ip_tracker::UserIpTracker;
|
||||||
use crate::protocol::constants::*;
|
use crate::protocol::constants::*;
|
||||||
use crate::protocol::tls;
|
use crate::protocol::tls;
|
||||||
|
use crate::stats::beobachten::BeobachtenStore;
|
||||||
use crate::stats::{ReplayChecker, Stats};
|
use crate::stats::{ReplayChecker, Stats};
|
||||||
use crate::stream::{BufferPool, CryptoReader, CryptoWriter};
|
use crate::stream::{BufferPool, CryptoReader, CryptoWriter};
|
||||||
use crate::transport::middle_proxy::MePool;
|
use crate::transport::middle_proxy::MePool;
|
||||||
@@ -38,6 +39,37 @@ use crate::proxy::direct_relay::handle_via_direct;
|
|||||||
use crate::proxy::handshake::{HandshakeSuccess, handle_mtproto_handshake, handle_tls_handshake};
|
use crate::proxy::handshake::{HandshakeSuccess, handle_mtproto_handshake, handle_tls_handshake};
|
||||||
use crate::proxy::masking::handle_bad_client;
|
use crate::proxy::masking::handle_bad_client;
|
||||||
use crate::proxy::middle_relay::handle_via_middle_proxy;
|
use crate::proxy::middle_relay::handle_via_middle_proxy;
|
||||||
|
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
|
||||||
|
|
||||||
|
fn beobachten_ttl(config: &ProxyConfig) -> Duration {
|
||||||
|
Duration::from_secs(config.general.beobachten_minutes.saturating_mul(60))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn record_beobachten_class(
|
||||||
|
beobachten: &BeobachtenStore,
|
||||||
|
config: &ProxyConfig,
|
||||||
|
peer_ip: IpAddr,
|
||||||
|
class: &str,
|
||||||
|
) {
|
||||||
|
if !config.general.beobachten {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
beobachten.record(class, peer_ip, beobachten_ttl(config));
|
||||||
|
}
|
||||||
|
|
||||||
|
fn record_handshake_failure_class(
|
||||||
|
beobachten: &BeobachtenStore,
|
||||||
|
config: &ProxyConfig,
|
||||||
|
peer_ip: IpAddr,
|
||||||
|
error: &ProxyError,
|
||||||
|
) {
|
||||||
|
let class = if error.to_string().contains("expected 64 bytes, got 0") {
|
||||||
|
"expected_64_got_0"
|
||||||
|
} else {
|
||||||
|
"other"
|
||||||
|
};
|
||||||
|
record_beobachten_class(beobachten, config, peer_ip, class);
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn handle_client_stream<S>(
|
pub async fn handle_client_stream<S>(
|
||||||
mut stream: S,
|
mut stream: S,
|
||||||
@@ -49,8 +81,10 @@ pub async fn handle_client_stream<S>(
|
|||||||
buffer_pool: Arc<BufferPool>,
|
buffer_pool: Arc<BufferPool>,
|
||||||
rng: Arc<SecureRandom>,
|
rng: Arc<SecureRandom>,
|
||||||
me_pool: Option<Arc<MePool>>,
|
me_pool: Option<Arc<MePool>>,
|
||||||
|
route_runtime: Arc<RouteRuntimeController>,
|
||||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||||
ip_tracker: Arc<UserIpTracker>,
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
|
beobachten: Arc<BeobachtenStore>,
|
||||||
proxy_protocol_enabled: bool,
|
proxy_protocol_enabled: bool,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
@@ -59,9 +93,17 @@ where
|
|||||||
stats.increment_connects_all();
|
stats.increment_connects_all();
|
||||||
let mut real_peer = normalize_ip(peer);
|
let mut real_peer = normalize_ip(peer);
|
||||||
|
|
||||||
|
// For non-TCP streams, use a synthetic local address; may be overridden by PROXY protocol dst
|
||||||
|
let mut local_addr: SocketAddr = format!("0.0.0.0:{}", config.server.port)
|
||||||
|
.parse()
|
||||||
|
.unwrap_or_else(|_| "0.0.0.0:443".parse().unwrap());
|
||||||
|
|
||||||
if proxy_protocol_enabled {
|
if proxy_protocol_enabled {
|
||||||
match parse_proxy_protocol(&mut stream, peer).await {
|
let proxy_header_timeout = Duration::from_millis(
|
||||||
Ok(info) => {
|
config.server.proxy_protocol_header_timeout_ms.max(1),
|
||||||
|
);
|
||||||
|
match timeout(proxy_header_timeout, parse_proxy_protocol(&mut stream, peer)).await {
|
||||||
|
Ok(Ok(info)) => {
|
||||||
debug!(
|
debug!(
|
||||||
peer = %peer,
|
peer = %peer,
|
||||||
client = %info.src_addr,
|
client = %info.src_addr,
|
||||||
@@ -69,12 +111,22 @@ where
|
|||||||
"PROXY protocol header parsed"
|
"PROXY protocol header parsed"
|
||||||
);
|
);
|
||||||
real_peer = normalize_ip(info.src_addr);
|
real_peer = normalize_ip(info.src_addr);
|
||||||
|
if let Some(dst) = info.dst_addr {
|
||||||
|
local_addr = dst;
|
||||||
}
|
}
|
||||||
Err(e) => {
|
}
|
||||||
|
Ok(Err(e)) => {
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
warn!(peer = %peer, error = %e, "Invalid PROXY protocol header");
|
warn!(peer = %peer, error = %e, "Invalid PROXY protocol header");
|
||||||
|
record_beobachten_class(&beobachten, &config, peer.ip(), "other");
|
||||||
return Err(e);
|
return Err(e);
|
||||||
}
|
}
|
||||||
|
Err(_) => {
|
||||||
|
stats.increment_connects_bad();
|
||||||
|
warn!(peer = %peer, timeout_ms = proxy_header_timeout.as_millis(), "PROXY protocol header timeout");
|
||||||
|
record_beobachten_class(&beobachten, &config, peer.ip(), "other");
|
||||||
|
return Err(ProxyError::InvalidProxyProtocol);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -82,11 +134,9 @@ where
|
|||||||
|
|
||||||
let handshake_timeout = Duration::from_secs(config.timeouts.client_handshake);
|
let handshake_timeout = Duration::from_secs(config.timeouts.client_handshake);
|
||||||
let stats_for_timeout = stats.clone();
|
let stats_for_timeout = stats.clone();
|
||||||
|
let config_for_timeout = config.clone();
|
||||||
// For non-TCP streams, use a synthetic local address
|
let beobachten_for_timeout = beobachten.clone();
|
||||||
let local_addr: SocketAddr = format!("0.0.0.0:{}", config.server.port)
|
let peer_for_timeout = real_peer.ip();
|
||||||
.parse()
|
|
||||||
.unwrap_or_else(|_| "0.0.0.0:443".parse().unwrap());
|
|
||||||
|
|
||||||
// Phase 1: handshake (with timeout)
|
// Phase 1: handshake (with timeout)
|
||||||
let outcome = match timeout(handshake_timeout, async {
|
let outcome = match timeout(handshake_timeout, async {
|
||||||
@@ -103,7 +153,16 @@ where
|
|||||||
debug!(peer = %real_peer, tls_len = tls_len, "TLS handshake too short");
|
debug!(peer = %real_peer, tls_len = tls_len, "TLS handshake too short");
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
let (reader, writer) = tokio::io::split(stream);
|
let (reader, writer) = tokio::io::split(stream);
|
||||||
handle_bad_client(reader, writer, &first_bytes, &config).await;
|
handle_bad_client(
|
||||||
|
reader,
|
||||||
|
writer,
|
||||||
|
&first_bytes,
|
||||||
|
real_peer,
|
||||||
|
local_addr,
|
||||||
|
&config,
|
||||||
|
&beobachten,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
return Ok(HandshakeOutcome::Handled);
|
return Ok(HandshakeOutcome::Handled);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -113,14 +172,23 @@ where
|
|||||||
|
|
||||||
let (read_half, write_half) = tokio::io::split(stream);
|
let (read_half, write_half) = tokio::io::split(stream);
|
||||||
|
|
||||||
let (mut tls_reader, tls_writer, _tls_user) = match handle_tls_handshake(
|
let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake(
|
||||||
&handshake, read_half, write_half, real_peer,
|
&handshake, read_half, write_half, real_peer,
|
||||||
&config, &replay_checker, &rng, tls_cache.clone(),
|
&config, &replay_checker, &rng, tls_cache.clone(),
|
||||||
).await {
|
).await {
|
||||||
HandshakeResult::Success(result) => result,
|
HandshakeResult::Success(result) => result,
|
||||||
HandshakeResult::BadClient { reader, writer } => {
|
HandshakeResult::BadClient { reader, writer } => {
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
handle_bad_client(reader, writer, &handshake, &config).await;
|
handle_bad_client(
|
||||||
|
reader,
|
||||||
|
writer,
|
||||||
|
&handshake,
|
||||||
|
real_peer,
|
||||||
|
local_addr,
|
||||||
|
&config,
|
||||||
|
&beobachten,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
return Ok(HandshakeOutcome::Handled);
|
return Ok(HandshakeOutcome::Handled);
|
||||||
}
|
}
|
||||||
HandshakeResult::Error(e) => return Err(e),
|
HandshakeResult::Error(e) => return Err(e),
|
||||||
@@ -133,7 +201,7 @@ where
|
|||||||
|
|
||||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
||||||
&mtproto_handshake, tls_reader, tls_writer, real_peer,
|
&mtproto_handshake, tls_reader, tls_writer, real_peer,
|
||||||
&config, &replay_checker, true,
|
&config, &replay_checker, true, Some(tls_user.as_str()),
|
||||||
).await {
|
).await {
|
||||||
HandshakeResult::Success(result) => result,
|
HandshakeResult::Success(result) => result,
|
||||||
HandshakeResult::BadClient { reader: _, writer: _ } => {
|
HandshakeResult::BadClient { reader: _, writer: _ } => {
|
||||||
@@ -148,6 +216,7 @@ where
|
|||||||
RunningClientHandler::handle_authenticated_static(
|
RunningClientHandler::handle_authenticated_static(
|
||||||
crypto_reader, crypto_writer, success,
|
crypto_reader, crypto_writer, success,
|
||||||
upstream_manager, stats, config, buffer_pool, rng, me_pool,
|
upstream_manager, stats, config, buffer_pool, rng, me_pool,
|
||||||
|
route_runtime.clone(),
|
||||||
local_addr, real_peer, ip_tracker.clone(),
|
local_addr, real_peer, ip_tracker.clone(),
|
||||||
),
|
),
|
||||||
)))
|
)))
|
||||||
@@ -156,7 +225,16 @@ where
|
|||||||
debug!(peer = %real_peer, "Non-TLS modes disabled");
|
debug!(peer = %real_peer, "Non-TLS modes disabled");
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
let (reader, writer) = tokio::io::split(stream);
|
let (reader, writer) = tokio::io::split(stream);
|
||||||
handle_bad_client(reader, writer, &first_bytes, &config).await;
|
handle_bad_client(
|
||||||
|
reader,
|
||||||
|
writer,
|
||||||
|
&first_bytes,
|
||||||
|
real_peer,
|
||||||
|
local_addr,
|
||||||
|
&config,
|
||||||
|
&beobachten,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
return Ok(HandshakeOutcome::Handled);
|
return Ok(HandshakeOutcome::Handled);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -168,12 +246,21 @@ where
|
|||||||
|
|
||||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
||||||
&handshake, read_half, write_half, real_peer,
|
&handshake, read_half, write_half, real_peer,
|
||||||
&config, &replay_checker, false,
|
&config, &replay_checker, false, None,
|
||||||
).await {
|
).await {
|
||||||
HandshakeResult::Success(result) => result,
|
HandshakeResult::Success(result) => result,
|
||||||
HandshakeResult::BadClient { reader, writer } => {
|
HandshakeResult::BadClient { reader, writer } => {
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
handle_bad_client(reader, writer, &handshake, &config).await;
|
handle_bad_client(
|
||||||
|
reader,
|
||||||
|
writer,
|
||||||
|
&handshake,
|
||||||
|
real_peer,
|
||||||
|
local_addr,
|
||||||
|
&config,
|
||||||
|
&beobachten,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
return Ok(HandshakeOutcome::Handled);
|
return Ok(HandshakeOutcome::Handled);
|
||||||
}
|
}
|
||||||
HandshakeResult::Error(e) => return Err(e),
|
HandshakeResult::Error(e) => return Err(e),
|
||||||
@@ -190,6 +277,7 @@ where
|
|||||||
buffer_pool,
|
buffer_pool,
|
||||||
rng,
|
rng,
|
||||||
me_pool,
|
me_pool,
|
||||||
|
route_runtime.clone(),
|
||||||
local_addr,
|
local_addr,
|
||||||
real_peer,
|
real_peer,
|
||||||
ip_tracker.clone(),
|
ip_tracker.clone(),
|
||||||
@@ -200,11 +288,23 @@ where
|
|||||||
Ok(Ok(outcome)) => outcome,
|
Ok(Ok(outcome)) => outcome,
|
||||||
Ok(Err(e)) => {
|
Ok(Err(e)) => {
|
||||||
debug!(peer = %peer, error = %e, "Handshake failed");
|
debug!(peer = %peer, error = %e, "Handshake failed");
|
||||||
|
record_handshake_failure_class(
|
||||||
|
&beobachten_for_timeout,
|
||||||
|
&config_for_timeout,
|
||||||
|
peer_for_timeout,
|
||||||
|
&e,
|
||||||
|
);
|
||||||
return Err(e);
|
return Err(e);
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
stats_for_timeout.increment_handshake_timeouts();
|
stats_for_timeout.increment_handshake_timeouts();
|
||||||
debug!(peer = %peer, "Handshake timeout");
|
debug!(peer = %peer, "Handshake timeout");
|
||||||
|
record_beobachten_class(
|
||||||
|
&beobachten_for_timeout,
|
||||||
|
&config_for_timeout,
|
||||||
|
peer_for_timeout,
|
||||||
|
"other",
|
||||||
|
);
|
||||||
return Err(ProxyError::TgHandshakeTimeout);
|
return Err(ProxyError::TgHandshakeTimeout);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -221,6 +321,8 @@ pub struct ClientHandler;
|
|||||||
pub struct RunningClientHandler {
|
pub struct RunningClientHandler {
|
||||||
stream: TcpStream,
|
stream: TcpStream,
|
||||||
peer: SocketAddr,
|
peer: SocketAddr,
|
||||||
|
real_peer_from_proxy: Option<SocketAddr>,
|
||||||
|
real_peer_report: Arc<std::sync::Mutex<Option<SocketAddr>>>,
|
||||||
config: Arc<ProxyConfig>,
|
config: Arc<ProxyConfig>,
|
||||||
stats: Arc<Stats>,
|
stats: Arc<Stats>,
|
||||||
replay_checker: Arc<ReplayChecker>,
|
replay_checker: Arc<ReplayChecker>,
|
||||||
@@ -228,8 +330,10 @@ pub struct RunningClientHandler {
|
|||||||
buffer_pool: Arc<BufferPool>,
|
buffer_pool: Arc<BufferPool>,
|
||||||
rng: Arc<SecureRandom>,
|
rng: Arc<SecureRandom>,
|
||||||
me_pool: Option<Arc<MePool>>,
|
me_pool: Option<Arc<MePool>>,
|
||||||
|
route_runtime: Arc<RouteRuntimeController>,
|
||||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||||
ip_tracker: Arc<UserIpTracker>,
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
|
beobachten: Arc<BeobachtenStore>,
|
||||||
proxy_protocol_enabled: bool,
|
proxy_protocol_enabled: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -244,13 +348,19 @@ impl ClientHandler {
|
|||||||
buffer_pool: Arc<BufferPool>,
|
buffer_pool: Arc<BufferPool>,
|
||||||
rng: Arc<SecureRandom>,
|
rng: Arc<SecureRandom>,
|
||||||
me_pool: Option<Arc<MePool>>,
|
me_pool: Option<Arc<MePool>>,
|
||||||
|
route_runtime: Arc<RouteRuntimeController>,
|
||||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||||
ip_tracker: Arc<UserIpTracker>,
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
|
beobachten: Arc<BeobachtenStore>,
|
||||||
proxy_protocol_enabled: bool,
|
proxy_protocol_enabled: bool,
|
||||||
|
real_peer_report: Arc<std::sync::Mutex<Option<SocketAddr>>>,
|
||||||
) -> RunningClientHandler {
|
) -> RunningClientHandler {
|
||||||
|
let normalized_peer = normalize_ip(peer);
|
||||||
RunningClientHandler {
|
RunningClientHandler {
|
||||||
stream,
|
stream,
|
||||||
peer,
|
peer: normalized_peer,
|
||||||
|
real_peer_from_proxy: None,
|
||||||
|
real_peer_report,
|
||||||
config,
|
config,
|
||||||
stats,
|
stats,
|
||||||
replay_checker,
|
replay_checker,
|
||||||
@@ -258,20 +368,20 @@ impl ClientHandler {
|
|||||||
buffer_pool,
|
buffer_pool,
|
||||||
rng,
|
rng,
|
||||||
me_pool,
|
me_pool,
|
||||||
|
route_runtime,
|
||||||
tls_cache,
|
tls_cache,
|
||||||
ip_tracker,
|
ip_tracker,
|
||||||
|
beobachten,
|
||||||
proxy_protocol_enabled,
|
proxy_protocol_enabled,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RunningClientHandler {
|
impl RunningClientHandler {
|
||||||
pub async fn run(mut self) -> Result<()> {
|
pub async fn run(self) -> Result<()> {
|
||||||
self.stats.increment_connects_all();
|
self.stats.increment_connects_all();
|
||||||
|
|
||||||
self.peer = normalize_ip(self.peer);
|
|
||||||
let peer = self.peer;
|
let peer = self.peer;
|
||||||
let ip_tracker = self.ip_tracker.clone();
|
let _ip_tracker = self.ip_tracker.clone();
|
||||||
debug!(peer = %peer, "New connection");
|
debug!(peer = %peer, "New connection");
|
||||||
|
|
||||||
if let Err(e) = configure_client_socket(
|
if let Err(e) = configure_client_socket(
|
||||||
@@ -284,17 +394,32 @@ impl RunningClientHandler {
|
|||||||
|
|
||||||
let handshake_timeout = Duration::from_secs(self.config.timeouts.client_handshake);
|
let handshake_timeout = Duration::from_secs(self.config.timeouts.client_handshake);
|
||||||
let stats = self.stats.clone();
|
let stats = self.stats.clone();
|
||||||
|
let config_for_timeout = self.config.clone();
|
||||||
|
let beobachten_for_timeout = self.beobachten.clone();
|
||||||
|
let peer_for_timeout = peer.ip();
|
||||||
|
|
||||||
// Phase 1: handshake (with timeout)
|
// Phase 1: handshake (with timeout)
|
||||||
let outcome = match timeout(handshake_timeout, self.do_handshake()).await {
|
let outcome = match timeout(handshake_timeout, self.do_handshake()).await {
|
||||||
Ok(Ok(outcome)) => outcome,
|
Ok(Ok(outcome)) => outcome,
|
||||||
Ok(Err(e)) => {
|
Ok(Err(e)) => {
|
||||||
debug!(peer = %peer, error = %e, "Handshake failed");
|
debug!(peer = %peer, error = %e, "Handshake failed");
|
||||||
|
record_handshake_failure_class(
|
||||||
|
&beobachten_for_timeout,
|
||||||
|
&config_for_timeout,
|
||||||
|
peer_for_timeout,
|
||||||
|
&e,
|
||||||
|
);
|
||||||
return Err(e);
|
return Err(e);
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
stats.increment_handshake_timeouts();
|
stats.increment_handshake_timeouts();
|
||||||
debug!(peer = %peer, "Handshake timeout");
|
debug!(peer = %peer, "Handshake timeout");
|
||||||
|
record_beobachten_class(
|
||||||
|
&beobachten_for_timeout,
|
||||||
|
&config_for_timeout,
|
||||||
|
peer_for_timeout,
|
||||||
|
"other",
|
||||||
|
);
|
||||||
return Err(ProxyError::TgHandshakeTimeout);
|
return Err(ProxyError::TgHandshakeTimeout);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -307,9 +432,19 @@ impl RunningClientHandler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn do_handshake(mut self) -> Result<HandshakeOutcome> {
|
async fn do_handshake(mut self) -> Result<HandshakeOutcome> {
|
||||||
|
let mut local_addr = self.stream.local_addr().map_err(ProxyError::Io)?;
|
||||||
|
|
||||||
if self.proxy_protocol_enabled {
|
if self.proxy_protocol_enabled {
|
||||||
match parse_proxy_protocol(&mut self.stream, self.peer).await {
|
let proxy_header_timeout = Duration::from_millis(
|
||||||
Ok(info) => {
|
self.config.server.proxy_protocol_header_timeout_ms.max(1),
|
||||||
|
);
|
||||||
|
match timeout(
|
||||||
|
proxy_header_timeout,
|
||||||
|
parse_proxy_protocol(&mut self.stream, self.peer),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(Ok(info)) => {
|
||||||
debug!(
|
debug!(
|
||||||
peer = %self.peer,
|
peer = %self.peer,
|
||||||
client = %info.src_addr,
|
client = %info.src_addr,
|
||||||
@@ -317,12 +452,40 @@ impl RunningClientHandler {
|
|||||||
"PROXY protocol header parsed"
|
"PROXY protocol header parsed"
|
||||||
);
|
);
|
||||||
self.peer = normalize_ip(info.src_addr);
|
self.peer = normalize_ip(info.src_addr);
|
||||||
|
self.real_peer_from_proxy = Some(self.peer);
|
||||||
|
if let Ok(mut slot) = self.real_peer_report.lock() {
|
||||||
|
*slot = Some(self.peer);
|
||||||
}
|
}
|
||||||
Err(e) => {
|
if let Some(dst) = info.dst_addr {
|
||||||
|
local_addr = dst;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(Err(e)) => {
|
||||||
self.stats.increment_connects_bad();
|
self.stats.increment_connects_bad();
|
||||||
warn!(peer = %self.peer, error = %e, "Invalid PROXY protocol header");
|
warn!(peer = %self.peer, error = %e, "Invalid PROXY protocol header");
|
||||||
|
record_beobachten_class(
|
||||||
|
&self.beobachten,
|
||||||
|
&self.config,
|
||||||
|
self.peer.ip(),
|
||||||
|
"other",
|
||||||
|
);
|
||||||
return Err(e);
|
return Err(e);
|
||||||
}
|
}
|
||||||
|
Err(_) => {
|
||||||
|
self.stats.increment_connects_bad();
|
||||||
|
warn!(
|
||||||
|
peer = %self.peer,
|
||||||
|
timeout_ms = proxy_header_timeout.as_millis(),
|
||||||
|
"PROXY protocol header timeout"
|
||||||
|
);
|
||||||
|
record_beobachten_class(
|
||||||
|
&self.beobachten,
|
||||||
|
&self.config,
|
||||||
|
self.peer.ip(),
|
||||||
|
"other",
|
||||||
|
);
|
||||||
|
return Err(ProxyError::InvalidProxyProtocol);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -331,20 +494,20 @@ impl RunningClientHandler {
|
|||||||
|
|
||||||
let is_tls = tls::is_tls_handshake(&first_bytes[..3]);
|
let is_tls = tls::is_tls_handshake(&first_bytes[..3]);
|
||||||
let peer = self.peer;
|
let peer = self.peer;
|
||||||
let ip_tracker = self.ip_tracker.clone();
|
let _ip_tracker = self.ip_tracker.clone();
|
||||||
|
|
||||||
debug!(peer = %peer, is_tls = is_tls, "Handshake type detected");
|
debug!(peer = %peer, is_tls = is_tls, "Handshake type detected");
|
||||||
|
|
||||||
if is_tls {
|
if is_tls {
|
||||||
self.handle_tls_client(first_bytes).await
|
self.handle_tls_client(first_bytes, local_addr).await
|
||||||
} else {
|
} else {
|
||||||
self.handle_direct_client(first_bytes).await
|
self.handle_direct_client(first_bytes, local_addr).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_tls_client(mut self, first_bytes: [u8; 5]) -> Result<HandshakeOutcome> {
|
async fn handle_tls_client(mut self, first_bytes: [u8; 5], local_addr: SocketAddr) -> Result<HandshakeOutcome> {
|
||||||
let peer = self.peer;
|
let peer = self.peer;
|
||||||
let ip_tracker = self.ip_tracker.clone();
|
let _ip_tracker = self.ip_tracker.clone();
|
||||||
|
|
||||||
let tls_len = u16::from_be_bytes([first_bytes[3], first_bytes[4]]) as usize;
|
let tls_len = u16::from_be_bytes([first_bytes[3], first_bytes[4]]) as usize;
|
||||||
|
|
||||||
@@ -354,7 +517,16 @@ impl RunningClientHandler {
|
|||||||
debug!(peer = %peer, tls_len = tls_len, "TLS handshake too short");
|
debug!(peer = %peer, tls_len = tls_len, "TLS handshake too short");
|
||||||
self.stats.increment_connects_bad();
|
self.stats.increment_connects_bad();
|
||||||
let (reader, writer) = self.stream.into_split();
|
let (reader, writer) = self.stream.into_split();
|
||||||
handle_bad_client(reader, writer, &first_bytes, &self.config).await;
|
handle_bad_client(
|
||||||
|
reader,
|
||||||
|
writer,
|
||||||
|
&first_bytes,
|
||||||
|
peer,
|
||||||
|
local_addr,
|
||||||
|
&self.config,
|
||||||
|
&self.beobachten,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
return Ok(HandshakeOutcome::Handled);
|
return Ok(HandshakeOutcome::Handled);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -367,10 +539,9 @@ impl RunningClientHandler {
|
|||||||
let stats = self.stats.clone();
|
let stats = self.stats.clone();
|
||||||
let buffer_pool = self.buffer_pool.clone();
|
let buffer_pool = self.buffer_pool.clone();
|
||||||
|
|
||||||
let local_addr = self.stream.local_addr().map_err(ProxyError::Io)?;
|
|
||||||
let (read_half, write_half) = self.stream.into_split();
|
let (read_half, write_half) = self.stream.into_split();
|
||||||
|
|
||||||
let (mut tls_reader, tls_writer, _tls_user) = match handle_tls_handshake(
|
let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake(
|
||||||
&handshake,
|
&handshake,
|
||||||
read_half,
|
read_half,
|
||||||
write_half,
|
write_half,
|
||||||
@@ -385,7 +556,16 @@ impl RunningClientHandler {
|
|||||||
HandshakeResult::Success(result) => result,
|
HandshakeResult::Success(result) => result,
|
||||||
HandshakeResult::BadClient { reader, writer } => {
|
HandshakeResult::BadClient { reader, writer } => {
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
handle_bad_client(reader, writer, &handshake, &config).await;
|
handle_bad_client(
|
||||||
|
reader,
|
||||||
|
writer,
|
||||||
|
&handshake,
|
||||||
|
peer,
|
||||||
|
local_addr,
|
||||||
|
&config,
|
||||||
|
&self.beobachten,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
return Ok(HandshakeOutcome::Handled);
|
return Ok(HandshakeOutcome::Handled);
|
||||||
}
|
}
|
||||||
HandshakeResult::Error(e) => return Err(e),
|
HandshakeResult::Error(e) => return Err(e),
|
||||||
@@ -405,6 +585,7 @@ impl RunningClientHandler {
|
|||||||
&config,
|
&config,
|
||||||
&replay_checker,
|
&replay_checker,
|
||||||
true,
|
true,
|
||||||
|
Some(tls_user.as_str()),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
@@ -431,6 +612,7 @@ impl RunningClientHandler {
|
|||||||
buffer_pool,
|
buffer_pool,
|
||||||
self.rng,
|
self.rng,
|
||||||
self.me_pool,
|
self.me_pool,
|
||||||
|
self.route_runtime.clone(),
|
||||||
local_addr,
|
local_addr,
|
||||||
peer,
|
peer,
|
||||||
self.ip_tracker,
|
self.ip_tracker,
|
||||||
@@ -438,15 +620,24 @@ impl RunningClientHandler {
|
|||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_direct_client(mut self, first_bytes: [u8; 5]) -> Result<HandshakeOutcome> {
|
async fn handle_direct_client(mut self, first_bytes: [u8; 5], local_addr: SocketAddr) -> Result<HandshakeOutcome> {
|
||||||
let peer = self.peer;
|
let peer = self.peer;
|
||||||
let ip_tracker = self.ip_tracker.clone();
|
let _ip_tracker = self.ip_tracker.clone();
|
||||||
|
|
||||||
if !self.config.general.modes.classic && !self.config.general.modes.secure {
|
if !self.config.general.modes.classic && !self.config.general.modes.secure {
|
||||||
debug!(peer = %peer, "Non-TLS modes disabled");
|
debug!(peer = %peer, "Non-TLS modes disabled");
|
||||||
self.stats.increment_connects_bad();
|
self.stats.increment_connects_bad();
|
||||||
let (reader, writer) = self.stream.into_split();
|
let (reader, writer) = self.stream.into_split();
|
||||||
handle_bad_client(reader, writer, &first_bytes, &self.config).await;
|
handle_bad_client(
|
||||||
|
reader,
|
||||||
|
writer,
|
||||||
|
&first_bytes,
|
||||||
|
peer,
|
||||||
|
local_addr,
|
||||||
|
&self.config,
|
||||||
|
&self.beobachten,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
return Ok(HandshakeOutcome::Handled);
|
return Ok(HandshakeOutcome::Handled);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -459,7 +650,6 @@ impl RunningClientHandler {
|
|||||||
let stats = self.stats.clone();
|
let stats = self.stats.clone();
|
||||||
let buffer_pool = self.buffer_pool.clone();
|
let buffer_pool = self.buffer_pool.clone();
|
||||||
|
|
||||||
let local_addr = self.stream.local_addr().map_err(ProxyError::Io)?;
|
|
||||||
let (read_half, write_half) = self.stream.into_split();
|
let (read_half, write_half) = self.stream.into_split();
|
||||||
|
|
||||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
||||||
@@ -470,13 +660,23 @@ impl RunningClientHandler {
|
|||||||
&config,
|
&config,
|
||||||
&replay_checker,
|
&replay_checker,
|
||||||
false,
|
false,
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
HandshakeResult::Success(result) => result,
|
HandshakeResult::Success(result) => result,
|
||||||
HandshakeResult::BadClient { reader, writer } => {
|
HandshakeResult::BadClient { reader, writer } => {
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
handle_bad_client(reader, writer, &handshake, &config).await;
|
handle_bad_client(
|
||||||
|
reader,
|
||||||
|
writer,
|
||||||
|
&handshake,
|
||||||
|
peer,
|
||||||
|
local_addr,
|
||||||
|
&config,
|
||||||
|
&self.beobachten,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
return Ok(HandshakeOutcome::Handled);
|
return Ok(HandshakeOutcome::Handled);
|
||||||
}
|
}
|
||||||
HandshakeResult::Error(e) => return Err(e),
|
HandshakeResult::Error(e) => return Err(e),
|
||||||
@@ -493,6 +693,7 @@ impl RunningClientHandler {
|
|||||||
buffer_pool,
|
buffer_pool,
|
||||||
self.rng,
|
self.rng,
|
||||||
self.me_pool,
|
self.me_pool,
|
||||||
|
self.route_runtime.clone(),
|
||||||
local_addr,
|
local_addr,
|
||||||
peer,
|
peer,
|
||||||
self.ip_tracker,
|
self.ip_tracker,
|
||||||
@@ -514,6 +715,7 @@ impl RunningClientHandler {
|
|||||||
buffer_pool: Arc<BufferPool>,
|
buffer_pool: Arc<BufferPool>,
|
||||||
rng: Arc<SecureRandom>,
|
rng: Arc<SecureRandom>,
|
||||||
me_pool: Option<Arc<MePool>>,
|
me_pool: Option<Arc<MePool>>,
|
||||||
|
route_runtime: Arc<RouteRuntimeController>,
|
||||||
local_addr: SocketAddr,
|
local_addr: SocketAddr,
|
||||||
peer_addr: SocketAddr,
|
peer_addr: SocketAddr,
|
||||||
ip_tracker: Arc<UserIpTracker>,
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
@@ -522,42 +724,20 @@ impl RunningClientHandler {
|
|||||||
R: AsyncRead + Unpin + Send + 'static,
|
R: AsyncRead + Unpin + Send + 'static,
|
||||||
W: AsyncWrite + Unpin + Send + 'static,
|
W: AsyncWrite + Unpin + Send + 'static,
|
||||||
{
|
{
|
||||||
let user = &success.user;
|
let user = success.user.clone();
|
||||||
|
|
||||||
if let Err(e) = Self::check_user_limits_static(user, &config, &stats, peer_addr, &ip_tracker).await {
|
if let Err(e) = Self::check_user_limits_static(&user, &config, &stats, peer_addr, &ip_tracker).await {
|
||||||
warn!(user = %user, error = %e, "User limit exceeded");
|
warn!(user = %user, error = %e, "User limit exceeded");
|
||||||
return Err(e);
|
return Err(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
// IP Cleanup Guard: автоматически удаляет IP при выходе из scope
|
let route_snapshot = route_runtime.snapshot();
|
||||||
struct IpCleanupGuard {
|
let session_id = rng.u64();
|
||||||
tracker: Arc<UserIpTracker>,
|
let relay_result = if config.general.use_middle_proxy
|
||||||
user: String,
|
&& matches!(route_snapshot.mode, RelayRouteMode::Middle)
|
||||||
ip: std::net::IpAddr,
|
{
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for IpCleanupGuard {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
let tracker = self.tracker.clone();
|
|
||||||
let user = self.user.clone();
|
|
||||||
let ip = self.ip;
|
|
||||||
tokio::spawn(async move {
|
|
||||||
tracker.remove_ip(&user, ip).await;
|
|
||||||
debug!(user = %user, ip = %ip, "IP cleaned up on disconnect");
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let _cleanup = IpCleanupGuard {
|
|
||||||
tracker: ip_tracker,
|
|
||||||
user: user.clone(),
|
|
||||||
ip: peer_addr.ip(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Decide: middle proxy or direct
|
|
||||||
if config.general.use_middle_proxy {
|
|
||||||
if let Some(ref pool) = me_pool {
|
if let Some(ref pool) = me_pool {
|
||||||
return handle_via_middle_proxy(
|
handle_via_middle_proxy(
|
||||||
client_reader,
|
client_reader,
|
||||||
client_writer,
|
client_writer,
|
||||||
success,
|
success,
|
||||||
@@ -567,12 +747,29 @@ impl RunningClientHandler {
|
|||||||
buffer_pool,
|
buffer_pool,
|
||||||
local_addr,
|
local_addr,
|
||||||
rng,
|
rng,
|
||||||
|
route_runtime.subscribe(),
|
||||||
|
route_snapshot,
|
||||||
|
session_id,
|
||||||
)
|
)
|
||||||
.await;
|
.await
|
||||||
}
|
} else {
|
||||||
warn!("use_middle_proxy=true but MePool not initialized, falling back to direct");
|
warn!("use_middle_proxy=true but MePool not initialized, falling back to direct");
|
||||||
|
handle_via_direct(
|
||||||
|
client_reader,
|
||||||
|
client_writer,
|
||||||
|
success,
|
||||||
|
upstream_manager,
|
||||||
|
stats,
|
||||||
|
config,
|
||||||
|
buffer_pool,
|
||||||
|
rng,
|
||||||
|
route_runtime.subscribe(),
|
||||||
|
route_snapshot,
|
||||||
|
session_id,
|
||||||
|
)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
// Direct mode (original behavior)
|
// Direct mode (original behavior)
|
||||||
handle_via_direct(
|
handle_via_direct(
|
||||||
client_reader,
|
client_reader,
|
||||||
@@ -583,8 +780,15 @@ impl RunningClientHandler {
|
|||||||
config,
|
config,
|
||||||
buffer_pool,
|
buffer_pool,
|
||||||
rng,
|
rng,
|
||||||
|
route_runtime.subscribe(),
|
||||||
|
route_snapshot,
|
||||||
|
session_id,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
};
|
||||||
|
|
||||||
|
ip_tracker.remove_ip(&user, peer_addr.ip()).await;
|
||||||
|
relay_result
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn check_user_limits_static(
|
async fn check_user_limits_static(
|
||||||
@@ -594,16 +798,17 @@ impl RunningClientHandler {
|
|||||||
peer_addr: SocketAddr,
|
peer_addr: SocketAddr,
|
||||||
ip_tracker: &UserIpTracker,
|
ip_tracker: &UserIpTracker,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
if let Some(expiration) = config.access.user_expirations.get(user) {
|
if let Some(expiration) = config.access.user_expirations.get(user)
|
||||||
if chrono::Utc::now() > *expiration {
|
&& chrono::Utc::now() > *expiration
|
||||||
|
{
|
||||||
return Err(ProxyError::UserExpired {
|
return Err(ProxyError::UserExpired {
|
||||||
user: user.to_string(),
|
user: user.to_string(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// IP limit check
|
let ip_reserved = match ip_tracker.check_and_add(user, peer_addr.ip()).await {
|
||||||
if let Err(reason) = ip_tracker.check_and_add(user, peer_addr.ip()).await {
|
Ok(()) => true,
|
||||||
|
Err(reason) => {
|
||||||
warn!(
|
warn!(
|
||||||
user = %user,
|
user = %user,
|
||||||
ip = %peer_addr.ip(),
|
ip = %peer_addr.ip(),
|
||||||
@@ -614,22 +819,32 @@ impl RunningClientHandler {
|
|||||||
user: user.to_string(),
|
user: user.to_string(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
// IP limit check
|
||||||
|
|
||||||
if let Some(limit) = config.access.user_max_tcp_conns.get(user) {
|
if let Some(limit) = config.access.user_max_tcp_conns.get(user)
|
||||||
if stats.get_user_curr_connects(user) >= *limit as u64 {
|
&& stats.get_user_curr_connects(user) >= *limit as u64
|
||||||
|
{
|
||||||
|
if ip_reserved {
|
||||||
|
ip_tracker.remove_ip(user, peer_addr.ip()).await;
|
||||||
|
stats.increment_ip_reservation_rollback_tcp_limit_total();
|
||||||
|
}
|
||||||
return Err(ProxyError::ConnectionLimitExceeded {
|
return Err(ProxyError::ConnectionLimitExceeded {
|
||||||
user: user.to_string(),
|
user: user.to_string(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(quota) = config.access.user_data_quota.get(user) {
|
if let Some(quota) = config.access.user_data_quota.get(user)
|
||||||
if stats.get_user_total_octets(user) >= *quota {
|
&& stats.get_user_total_octets(user) >= *quota
|
||||||
|
{
|
||||||
|
if ip_reserved {
|
||||||
|
ip_tracker.remove_ip(user, peer_addr.ip()).await;
|
||||||
|
stats.increment_ip_reservation_rollback_quota_limit_total();
|
||||||
|
}
|
||||||
return Err(ProxyError::DataQuotaExceeded {
|
return Err(ProxyError::DataQuotaExceeded {
|
||||||
user: user.to_string(),
|
user: user.to_string(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,14 +5,19 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
|
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
|
||||||
use tokio::net::TcpStream;
|
use tokio::net::TcpStream;
|
||||||
|
use tokio::sync::watch;
|
||||||
use tracing::{debug, info, warn};
|
use tracing::{debug, info, warn};
|
||||||
|
|
||||||
use crate::config::ProxyConfig;
|
use crate::config::ProxyConfig;
|
||||||
use crate::crypto::SecureRandom;
|
use crate::crypto::SecureRandom;
|
||||||
use crate::error::Result;
|
use crate::error::{ProxyError, Result};
|
||||||
use crate::protocol::constants::*;
|
use crate::protocol::constants::*;
|
||||||
use crate::proxy::handshake::{HandshakeSuccess, encrypt_tg_nonce_with_ciphers, generate_tg_nonce};
|
use crate::proxy::handshake::{HandshakeSuccess, encrypt_tg_nonce_with_ciphers, generate_tg_nonce};
|
||||||
use crate::proxy::relay::relay_bidirectional;
|
use crate::proxy::relay::relay_bidirectional;
|
||||||
|
use crate::proxy::route_mode::{
|
||||||
|
RelayRouteMode, RouteCutoverState, ROUTE_SWITCH_ERROR_MSG, affected_cutover_state,
|
||||||
|
cutover_stagger_delay,
|
||||||
|
};
|
||||||
use crate::stats::Stats;
|
use crate::stats::Stats;
|
||||||
use crate::stream::{BufferPool, CryptoReader, CryptoWriter};
|
use crate::stream::{BufferPool, CryptoReader, CryptoWriter};
|
||||||
use crate::transport::UpstreamManager;
|
use crate::transport::UpstreamManager;
|
||||||
@@ -26,6 +31,9 @@ pub(crate) async fn handle_via_direct<R, W>(
|
|||||||
config: Arc<ProxyConfig>,
|
config: Arc<ProxyConfig>,
|
||||||
buffer_pool: Arc<BufferPool>,
|
buffer_pool: Arc<BufferPool>,
|
||||||
rng: Arc<SecureRandom>,
|
rng: Arc<SecureRandom>,
|
||||||
|
mut route_rx: watch::Receiver<RouteCutoverState>,
|
||||||
|
route_snapshot: RouteCutoverState,
|
||||||
|
session_id: u64,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
R: AsyncRead + Unpin + Send + 'static,
|
R: AsyncRead + Unpin + Send + 'static,
|
||||||
@@ -34,7 +42,7 @@ where
|
|||||||
let user = &success.user;
|
let user = &success.user;
|
||||||
let dc_addr = get_dc_addr_static(success.dc_idx, &config)?;
|
let dc_addr = get_dc_addr_static(success.dc_idx, &config)?;
|
||||||
|
|
||||||
info!(
|
debug!(
|
||||||
user = %user,
|
user = %user,
|
||||||
peer = %success.peer,
|
peer = %success.peer,
|
||||||
dc = success.dc_idx,
|
dc = success.dc_idx,
|
||||||
@@ -57,18 +65,50 @@ where
|
|||||||
|
|
||||||
stats.increment_user_connects(user);
|
stats.increment_user_connects(user);
|
||||||
stats.increment_user_curr_connects(user);
|
stats.increment_user_curr_connects(user);
|
||||||
|
stats.increment_current_connections_direct();
|
||||||
|
|
||||||
let relay_result = relay_bidirectional(
|
let relay_result = relay_bidirectional(
|
||||||
client_reader,
|
client_reader,
|
||||||
client_writer,
|
client_writer,
|
||||||
tg_reader,
|
tg_reader,
|
||||||
tg_writer,
|
tg_writer,
|
||||||
|
config.general.direct_relay_copy_buf_c2s_bytes,
|
||||||
|
config.general.direct_relay_copy_buf_s2c_bytes,
|
||||||
user,
|
user,
|
||||||
Arc::clone(&stats),
|
Arc::clone(&stats),
|
||||||
buffer_pool,
|
buffer_pool,
|
||||||
)
|
);
|
||||||
.await;
|
tokio::pin!(relay_result);
|
||||||
|
let relay_result = loop {
|
||||||
|
if let Some(cutover) = affected_cutover_state(
|
||||||
|
&route_rx,
|
||||||
|
RelayRouteMode::Direct,
|
||||||
|
route_snapshot.generation,
|
||||||
|
) {
|
||||||
|
let delay = cutover_stagger_delay(session_id, cutover.generation);
|
||||||
|
warn!(
|
||||||
|
user = %user,
|
||||||
|
target_mode = cutover.mode.as_str(),
|
||||||
|
cutover_generation = cutover.generation,
|
||||||
|
delay_ms = delay.as_millis() as u64,
|
||||||
|
"Cutover affected direct session, closing client connection"
|
||||||
|
);
|
||||||
|
tokio::time::sleep(delay).await;
|
||||||
|
break Err(ProxyError::Proxy(ROUTE_SWITCH_ERROR_MSG.to_string()));
|
||||||
|
}
|
||||||
|
tokio::select! {
|
||||||
|
result = &mut relay_result => {
|
||||||
|
break result;
|
||||||
|
}
|
||||||
|
changed = route_rx.changed() => {
|
||||||
|
if changed.is_err() {
|
||||||
|
break relay_result.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
stats.decrement_current_connections_direct();
|
||||||
stats.decrement_user_curr_connects(user);
|
stats.decrement_user_curr_connects(user);
|
||||||
|
|
||||||
match &relay_result {
|
match &relay_result {
|
||||||
@@ -118,10 +158,16 @@ fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
|
|||||||
// Unknown DC requested by client without override: log and fall back.
|
// Unknown DC requested by client without override: log and fall back.
|
||||||
if !config.dc_overrides.contains_key(&dc_key) {
|
if !config.dc_overrides.contains_key(&dc_key) {
|
||||||
warn!(dc_idx = dc_idx, "Requested non-standard DC with no override; falling back to default cluster");
|
warn!(dc_idx = dc_idx, "Requested non-standard DC with no override; falling back to default cluster");
|
||||||
if let Some(path) = &config.general.unknown_dc_log_path {
|
if config.general.unknown_dc_file_log_enabled
|
||||||
|
&& let Some(path) = &config.general.unknown_dc_log_path
|
||||||
|
&& let Ok(handle) = tokio::runtime::Handle::try_current()
|
||||||
|
{
|
||||||
|
let path = path.clone();
|
||||||
|
handle.spawn_blocking(move || {
|
||||||
if let Ok(mut file) = OpenOptions::new().create(true).append(true).open(path) {
|
if let Ok(mut file) = OpenOptions::new().create(true).append(true).open(path) {
|
||||||
let _ = writeln!(file, "dc_idx={dc_idx}");
|
let _ = writeln!(file, "dc_idx={dc_idx}");
|
||||||
}
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
//! MTProto Handshake
|
//! MTProto Handshake
|
||||||
|
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
|
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
|
||||||
use tracing::{debug, warn, trace, info};
|
use tracing::{debug, warn, trace};
|
||||||
use zeroize::Zeroize;
|
use zeroize::Zeroize;
|
||||||
|
|
||||||
use crate::crypto::{sha256, AesCtr, SecureRandom};
|
use crate::crypto::{sha256, AesCtr, SecureRandom};
|
||||||
@@ -17,6 +19,31 @@ use crate::stats::ReplayChecker;
|
|||||||
use crate::config::ProxyConfig;
|
use crate::config::ProxyConfig;
|
||||||
use crate::tls_front::{TlsFrontCache, emulator};
|
use crate::tls_front::{TlsFrontCache, emulator};
|
||||||
|
|
||||||
|
fn decode_user_secrets(
|
||||||
|
config: &ProxyConfig,
|
||||||
|
preferred_user: Option<&str>,
|
||||||
|
) -> Vec<(String, Vec<u8>)> {
|
||||||
|
let mut secrets = Vec::with_capacity(config.access.users.len());
|
||||||
|
|
||||||
|
if let Some(preferred) = preferred_user
|
||||||
|
&& let Some(secret_hex) = config.access.users.get(preferred)
|
||||||
|
&& let Ok(bytes) = hex::decode(secret_hex)
|
||||||
|
{
|
||||||
|
secrets.push((preferred.to_string(), bytes));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (name, secret_hex) in &config.access.users {
|
||||||
|
if preferred_user.is_some_and(|preferred| preferred == name.as_str()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if let Ok(bytes) = hex::decode(secret_hex) {
|
||||||
|
secrets.push((name.clone(), bytes));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
secrets
|
||||||
|
}
|
||||||
|
|
||||||
/// Result of successful handshake
|
/// Result of successful handshake
|
||||||
///
|
///
|
||||||
/// Key material (`dec_key`, `dec_iv`, `enc_key`, `enc_iv`) is
|
/// Key material (`dec_key`, `dec_iv`, `enc_key`, `enc_iv`) is
|
||||||
@@ -80,11 +107,7 @@ where
|
|||||||
return HandshakeResult::BadClient { reader, writer };
|
return HandshakeResult::BadClient { reader, writer };
|
||||||
}
|
}
|
||||||
|
|
||||||
let secrets: Vec<(String, Vec<u8>)> = config.access.users.iter()
|
let secrets = decode_user_secrets(config, None);
|
||||||
.filter_map(|(name, hex)| {
|
|
||||||
hex::decode(hex).ok().map(|bytes| (name.clone(), bytes))
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let validation = match tls::validate_tls_handshake(
|
let validation = match tls::validate_tls_handshake(
|
||||||
handshake,
|
handshake,
|
||||||
@@ -199,7 +222,7 @@ where
|
|||||||
return HandshakeResult::Error(ProxyError::Io(e));
|
return HandshakeResult::Error(ProxyError::Io(e));
|
||||||
}
|
}
|
||||||
|
|
||||||
info!(
|
debug!(
|
||||||
peer = %peer,
|
peer = %peer,
|
||||||
user = %validation.user,
|
user = %validation.user,
|
||||||
"TLS handshake successful"
|
"TLS handshake successful"
|
||||||
@@ -221,6 +244,7 @@ pub async fn handle_mtproto_handshake<R, W>(
|
|||||||
config: &ProxyConfig,
|
config: &ProxyConfig,
|
||||||
replay_checker: &ReplayChecker,
|
replay_checker: &ReplayChecker,
|
||||||
is_tls: bool,
|
is_tls: bool,
|
||||||
|
preferred_user: Option<&str>,
|
||||||
) -> HandshakeResult<(CryptoReader<R>, CryptoWriter<W>, HandshakeSuccess), R, W>
|
) -> HandshakeResult<(CryptoReader<R>, CryptoWriter<W>, HandshakeSuccess), R, W>
|
||||||
where
|
where
|
||||||
R: AsyncRead + Unpin + Send,
|
R: AsyncRead + Unpin + Send,
|
||||||
@@ -237,11 +261,9 @@ where
|
|||||||
|
|
||||||
let enc_prekey_iv: Vec<u8> = dec_prekey_iv.iter().rev().copied().collect();
|
let enc_prekey_iv: Vec<u8> = dec_prekey_iv.iter().rev().copied().collect();
|
||||||
|
|
||||||
for (user, secret_hex) in &config.access.users {
|
let decoded_users = decode_user_secrets(config, preferred_user);
|
||||||
let secret = match hex::decode(secret_hex) {
|
|
||||||
Ok(s) => s,
|
for (user, secret) in decoded_users {
|
||||||
Err(_) => continue,
|
|
||||||
};
|
|
||||||
|
|
||||||
let dec_prekey = &dec_prekey_iv[..PREKEY_LEN];
|
let dec_prekey = &dec_prekey_iv[..PREKEY_LEN];
|
||||||
let dec_iv_bytes = &dec_prekey_iv[PREKEY_LEN..];
|
let dec_iv_bytes = &dec_prekey_iv[PREKEY_LEN..];
|
||||||
@@ -309,7 +331,7 @@ where
|
|||||||
is_tls,
|
is_tls,
|
||||||
};
|
};
|
||||||
|
|
||||||
info!(
|
debug!(
|
||||||
peer = %peer,
|
peer = %peer,
|
||||||
user = %user,
|
user = %user,
|
||||||
dc = dc_idx,
|
dc = dc_idx,
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
//! Masking - forward unrecognized traffic to mask host
|
//! Masking - forward unrecognized traffic to mask host
|
||||||
|
|
||||||
use std::str;
|
use std::str;
|
||||||
|
use std::net::SocketAddr;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tokio::net::TcpStream;
|
use tokio::net::TcpStream;
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
@@ -9,6 +10,9 @@ use tokio::io::{AsyncRead, AsyncWrite, AsyncReadExt, AsyncWriteExt};
|
|||||||
use tokio::time::timeout;
|
use tokio::time::timeout;
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
use crate::config::ProxyConfig;
|
use crate::config::ProxyConfig;
|
||||||
|
use crate::network::dns_overrides::resolve_socket_addr;
|
||||||
|
use crate::stats::beobachten::BeobachtenStore;
|
||||||
|
use crate::transport::proxy_protocol::{ProxyProtocolV1Builder, ProxyProtocolV2Builder};
|
||||||
|
|
||||||
const MASK_TIMEOUT: Duration = Duration::from_secs(5);
|
const MASK_TIMEOUT: Duration = Duration::from_secs(5);
|
||||||
/// Maximum duration for the entire masking relay.
|
/// Maximum duration for the entire masking relay.
|
||||||
@@ -19,13 +23,13 @@ const MASK_BUFFER_SIZE: usize = 8192;
|
|||||||
/// Detect client type based on initial data
|
/// Detect client type based on initial data
|
||||||
fn detect_client_type(data: &[u8]) -> &'static str {
|
fn detect_client_type(data: &[u8]) -> &'static str {
|
||||||
// Check for HTTP request
|
// Check for HTTP request
|
||||||
if data.len() > 4 {
|
if data.len() > 4
|
||||||
if data.starts_with(b"GET ") || data.starts_with(b"POST") ||
|
&& (data.starts_with(b"GET ") || data.starts_with(b"POST") ||
|
||||||
data.starts_with(b"HEAD") || data.starts_with(b"PUT ") ||
|
data.starts_with(b"HEAD") || data.starts_with(b"PUT ") ||
|
||||||
data.starts_with(b"DELETE") || data.starts_with(b"OPTIONS") {
|
data.starts_with(b"DELETE") || data.starts_with(b"OPTIONS"))
|
||||||
|
{
|
||||||
return "HTTP";
|
return "HTTP";
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Check for TLS ClientHello (0x16 = handshake, 0x03 0x01-0x03 = TLS version)
|
// Check for TLS ClientHello (0x16 = handshake, 0x03 0x01-0x03 = TLS version)
|
||||||
if data.len() > 3 && data[0] == 0x16 && data[1] == 0x03 {
|
if data.len() > 3 && data[0] == 0x16 && data[1] == 0x03 {
|
||||||
@@ -50,20 +54,27 @@ pub async fn handle_bad_client<R, W>(
|
|||||||
reader: R,
|
reader: R,
|
||||||
writer: W,
|
writer: W,
|
||||||
initial_data: &[u8],
|
initial_data: &[u8],
|
||||||
|
peer: SocketAddr,
|
||||||
|
local_addr: SocketAddr,
|
||||||
config: &ProxyConfig,
|
config: &ProxyConfig,
|
||||||
|
beobachten: &BeobachtenStore,
|
||||||
)
|
)
|
||||||
where
|
where
|
||||||
R: AsyncRead + Unpin + Send + 'static,
|
R: AsyncRead + Unpin + Send + 'static,
|
||||||
W: AsyncWrite + Unpin + Send + 'static,
|
W: AsyncWrite + Unpin + Send + 'static,
|
||||||
{
|
{
|
||||||
|
let client_type = detect_client_type(initial_data);
|
||||||
|
if config.general.beobachten {
|
||||||
|
let ttl = Duration::from_secs(config.general.beobachten_minutes.saturating_mul(60));
|
||||||
|
beobachten.record(client_type, peer.ip(), ttl);
|
||||||
|
}
|
||||||
|
|
||||||
if !config.censorship.mask {
|
if !config.censorship.mask {
|
||||||
// Masking disabled, just consume data
|
// Masking disabled, just consume data
|
||||||
consume_client_data(reader).await;
|
consume_client_data(reader).await;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
let client_type = detect_client_type(initial_data);
|
|
||||||
|
|
||||||
// Connect via Unix socket or TCP
|
// Connect via Unix socket or TCP
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
if let Some(ref sock_path) = config.censorship.mask_unix_sock {
|
if let Some(ref sock_path) = config.censorship.mask_unix_sock {
|
||||||
@@ -77,7 +88,29 @@ where
|
|||||||
let connect_result = timeout(MASK_TIMEOUT, UnixStream::connect(sock_path)).await;
|
let connect_result = timeout(MASK_TIMEOUT, UnixStream::connect(sock_path)).await;
|
||||||
match connect_result {
|
match connect_result {
|
||||||
Ok(Ok(stream)) => {
|
Ok(Ok(stream)) => {
|
||||||
let (mask_read, mask_write) = stream.into_split();
|
let (mask_read, mut mask_write) = stream.into_split();
|
||||||
|
let proxy_header: Option<Vec<u8>> = match config.censorship.mask_proxy_protocol {
|
||||||
|
0 => None,
|
||||||
|
version => {
|
||||||
|
let header = match version {
|
||||||
|
2 => ProxyProtocolV2Builder::new().with_addrs(peer, local_addr).build(),
|
||||||
|
_ => match (peer, local_addr) {
|
||||||
|
(SocketAddr::V4(src), SocketAddr::V4(dst)) =>
|
||||||
|
ProxyProtocolV1Builder::new().tcp4(src.into(), dst.into()).build(),
|
||||||
|
(SocketAddr::V6(src), SocketAddr::V6(dst)) =>
|
||||||
|
ProxyProtocolV1Builder::new().tcp6(src.into(), dst.into()).build(),
|
||||||
|
_ =>
|
||||||
|
ProxyProtocolV1Builder::new().build(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
Some(header)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if let Some(header) = proxy_header {
|
||||||
|
if mask_write.write_all(&header).await.is_err() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
if timeout(MASK_RELAY_TIMEOUT, relay_to_mask(reader, writer, mask_read, mask_write, initial_data)).await.is_err() {
|
if timeout(MASK_RELAY_TIMEOUT, relay_to_mask(reader, writer, mask_read, mask_write, initial_data)).await.is_err() {
|
||||||
debug!("Mask relay timed out (unix socket)");
|
debug!("Mask relay timed out (unix socket)");
|
||||||
}
|
}
|
||||||
@@ -106,12 +139,37 @@ where
|
|||||||
"Forwarding bad client to mask host"
|
"Forwarding bad client to mask host"
|
||||||
);
|
);
|
||||||
|
|
||||||
// Connect to mask host
|
// Apply runtime DNS override for mask target when configured.
|
||||||
let mask_addr = format!("{}:{}", mask_host, mask_port);
|
let mask_addr = resolve_socket_addr(mask_host, mask_port)
|
||||||
|
.map(|addr| addr.to_string())
|
||||||
|
.unwrap_or_else(|| format!("{}:{}", mask_host, mask_port));
|
||||||
let connect_result = timeout(MASK_TIMEOUT, TcpStream::connect(&mask_addr)).await;
|
let connect_result = timeout(MASK_TIMEOUT, TcpStream::connect(&mask_addr)).await;
|
||||||
match connect_result {
|
match connect_result {
|
||||||
Ok(Ok(stream)) => {
|
Ok(Ok(stream)) => {
|
||||||
let (mask_read, mask_write) = stream.into_split();
|
let proxy_header: Option<Vec<u8>> = match config.censorship.mask_proxy_protocol {
|
||||||
|
0 => None,
|
||||||
|
version => {
|
||||||
|
let header = match version {
|
||||||
|
2 => ProxyProtocolV2Builder::new().with_addrs(peer, local_addr).build(),
|
||||||
|
_ => match (peer, local_addr) {
|
||||||
|
(SocketAddr::V4(src), SocketAddr::V4(dst)) =>
|
||||||
|
ProxyProtocolV1Builder::new().tcp4(src.into(), dst.into()).build(),
|
||||||
|
(SocketAddr::V6(src), SocketAddr::V6(dst)) =>
|
||||||
|
ProxyProtocolV1Builder::new().tcp6(src.into(), dst.into()).build(),
|
||||||
|
_ =>
|
||||||
|
ProxyProtocolV1Builder::new().build(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
Some(header)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let (mask_read, mut mask_write) = stream.into_split();
|
||||||
|
if let Some(header) = proxy_header {
|
||||||
|
if mask_write.write_all(&header).await.is_err() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
if timeout(MASK_RELAY_TIMEOUT, relay_to_mask(reader, writer, mask_read, mask_write, initial_data)).await.is_err() {
|
if timeout(MASK_RELAY_TIMEOUT, relay_to_mask(reader, writer, mask_read, mask_write, initial_data)).await.is_err() {
|
||||||
debug!("Mask relay timed out");
|
debug!("Mask relay timed out");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,26 +6,36 @@ use std::sync::atomic::{AtomicU64, Ordering};
|
|||||||
use std::sync::{Arc, Mutex, OnceLock};
|
use std::sync::{Arc, Mutex, OnceLock};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use bytes::Bytes;
|
||||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||||
use tokio::sync::{mpsc, oneshot};
|
use tokio::sync::{mpsc, oneshot, watch};
|
||||||
use tracing::{debug, info, trace, warn};
|
use tracing::{debug, trace, warn};
|
||||||
|
|
||||||
use crate::config::ProxyConfig;
|
use crate::config::ProxyConfig;
|
||||||
use crate::crypto::SecureRandom;
|
use crate::crypto::SecureRandom;
|
||||||
use crate::error::{ProxyError, Result};
|
use crate::error::{ProxyError, Result};
|
||||||
use crate::protocol::constants::{*, secure_padding_len};
|
use crate::protocol::constants::{*, secure_padding_len};
|
||||||
use crate::proxy::handshake::HandshakeSuccess;
|
use crate::proxy::handshake::HandshakeSuccess;
|
||||||
|
use crate::proxy::route_mode::{
|
||||||
|
RelayRouteMode, RouteCutoverState, ROUTE_SWITCH_ERROR_MSG, affected_cutover_state,
|
||||||
|
cutover_stagger_delay,
|
||||||
|
};
|
||||||
use crate::stats::Stats;
|
use crate::stats::Stats;
|
||||||
use crate::stream::{BufferPool, CryptoReader, CryptoWriter};
|
use crate::stream::{BufferPool, CryptoReader, CryptoWriter};
|
||||||
use crate::transport::middle_proxy::{MePool, MeResponse, proto_flags_for_tag};
|
use crate::transport::middle_proxy::{MePool, MeResponse, proto_flags_for_tag};
|
||||||
|
|
||||||
enum C2MeCommand {
|
enum C2MeCommand {
|
||||||
Data { payload: Vec<u8>, flags: u32 },
|
Data { payload: Bytes, flags: u32 },
|
||||||
Close,
|
Close,
|
||||||
}
|
}
|
||||||
|
|
||||||
const DESYNC_DEDUP_WINDOW: Duration = Duration::from_secs(60);
|
const DESYNC_DEDUP_WINDOW: Duration = Duration::from_secs(60);
|
||||||
const DESYNC_ERROR_CLASS: &str = "frame_too_large_crypto_desync";
|
const DESYNC_ERROR_CLASS: &str = "frame_too_large_crypto_desync";
|
||||||
|
const C2ME_CHANNEL_CAPACITY_FALLBACK: usize = 128;
|
||||||
|
const C2ME_SOFT_PRESSURE_MIN_FREE_SLOTS: usize = 64;
|
||||||
|
const C2ME_SENDER_FAIRNESS_BUDGET: usize = 32;
|
||||||
|
const ME_D2C_FLUSH_BATCH_MAX_FRAMES_MIN: usize = 1;
|
||||||
|
const ME_D2C_FLUSH_BATCH_MAX_BYTES_MIN: usize = 4096;
|
||||||
static DESYNC_DEDUP: OnceLock<Mutex<HashMap<u64, Instant>>> = OnceLock::new();
|
static DESYNC_DEDUP: OnceLock<Mutex<HashMap<u64, Instant>>> = OnceLock::new();
|
||||||
|
|
||||||
struct RelayForensicsState {
|
struct RelayForensicsState {
|
||||||
@@ -40,6 +50,31 @@ struct RelayForensicsState {
|
|||||||
desync_all_full: bool,
|
desync_all_full: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
struct MeD2cFlushPolicy {
|
||||||
|
max_frames: usize,
|
||||||
|
max_bytes: usize,
|
||||||
|
max_delay: Duration,
|
||||||
|
ack_flush_immediate: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MeD2cFlushPolicy {
|
||||||
|
fn from_config(config: &ProxyConfig) -> Self {
|
||||||
|
Self {
|
||||||
|
max_frames: config
|
||||||
|
.general
|
||||||
|
.me_d2c_flush_batch_max_frames
|
||||||
|
.max(ME_D2C_FLUSH_BATCH_MAX_FRAMES_MIN),
|
||||||
|
max_bytes: config
|
||||||
|
.general
|
||||||
|
.me_d2c_flush_batch_max_bytes
|
||||||
|
.max(ME_D2C_FLUSH_BATCH_MAX_BYTES_MIN),
|
||||||
|
max_delay: Duration::from_micros(config.general.me_d2c_flush_batch_max_delay_us),
|
||||||
|
ack_flush_immediate: config.general.me_d2c_ack_flush_immediate,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn hash_value<T: Hash>(value: &T) -> u64 {
|
fn hash_value<T: Hash>(value: &T) -> u64 {
|
||||||
let mut hasher = DefaultHasher::new();
|
let mut hasher = DefaultHasher::new();
|
||||||
value.hash(&mut hasher);
|
value.hash(&mut hasher);
|
||||||
@@ -166,6 +201,27 @@ fn report_desync_frame_too_large(
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn should_yield_c2me_sender(sent_since_yield: usize, has_backlog: bool) -> bool {
|
||||||
|
has_backlog && sent_since_yield >= C2ME_SENDER_FAIRNESS_BUDGET
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn enqueue_c2me_command(
|
||||||
|
tx: &mpsc::Sender<C2MeCommand>,
|
||||||
|
cmd: C2MeCommand,
|
||||||
|
) -> std::result::Result<(), mpsc::error::SendError<C2MeCommand>> {
|
||||||
|
match tx.try_send(cmd) {
|
||||||
|
Ok(()) => Ok(()),
|
||||||
|
Err(mpsc::error::TrySendError::Closed(cmd)) => Err(mpsc::error::SendError(cmd)),
|
||||||
|
Err(mpsc::error::TrySendError::Full(cmd)) => {
|
||||||
|
// Cooperative yield reduces burst catch-up when the per-conn queue is near saturation.
|
||||||
|
if tx.capacity() <= C2ME_SOFT_PRESSURE_MIN_FREE_SLOTS {
|
||||||
|
tokio::task::yield_now().await;
|
||||||
|
}
|
||||||
|
tx.send(cmd).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) async fn handle_via_middle_proxy<R, W>(
|
pub(crate) async fn handle_via_middle_proxy<R, W>(
|
||||||
mut crypto_reader: CryptoReader<R>,
|
mut crypto_reader: CryptoReader<R>,
|
||||||
crypto_writer: CryptoWriter<W>,
|
crypto_writer: CryptoWriter<W>,
|
||||||
@@ -176,6 +232,9 @@ pub(crate) async fn handle_via_middle_proxy<R, W>(
|
|||||||
_buffer_pool: Arc<BufferPool>,
|
_buffer_pool: Arc<BufferPool>,
|
||||||
local_addr: SocketAddr,
|
local_addr: SocketAddr,
|
||||||
rng: Arc<SecureRandom>,
|
rng: Arc<SecureRandom>,
|
||||||
|
mut route_rx: watch::Receiver<RouteCutoverState>,
|
||||||
|
route_snapshot: RouteCutoverState,
|
||||||
|
session_id: u64,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
R: AsyncRead + Unpin + Send + 'static,
|
R: AsyncRead + Unpin + Send + 'static,
|
||||||
@@ -184,13 +243,15 @@ where
|
|||||||
let user = success.user.clone();
|
let user = success.user.clone();
|
||||||
let peer = success.peer;
|
let peer = success.peer;
|
||||||
let proto_tag = success.proto_tag;
|
let proto_tag = success.proto_tag;
|
||||||
|
let pool_generation = me_pool.current_generation();
|
||||||
|
|
||||||
info!(
|
debug!(
|
||||||
user = %user,
|
user = %user,
|
||||||
peer = %peer,
|
peer = %peer,
|
||||||
dc = success.dc_idx,
|
dc = success.dc_idx,
|
||||||
proto = ?proto_tag,
|
proto = ?proto_tag,
|
||||||
mode = "middle_proxy",
|
mode = "middle_proxy",
|
||||||
|
pool_generation,
|
||||||
"Routing via Middle-End"
|
"Routing via Middle-End"
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -211,8 +272,45 @@ where
|
|||||||
|
|
||||||
stats.increment_user_connects(&user);
|
stats.increment_user_connects(&user);
|
||||||
stats.increment_user_curr_connects(&user);
|
stats.increment_user_curr_connects(&user);
|
||||||
|
stats.increment_current_connections_me();
|
||||||
|
|
||||||
let proto_flags = proto_flags_for_tag(proto_tag, me_pool.has_proxy_tag());
|
if let Some(cutover) = affected_cutover_state(
|
||||||
|
&route_rx,
|
||||||
|
RelayRouteMode::Middle,
|
||||||
|
route_snapshot.generation,
|
||||||
|
) {
|
||||||
|
let delay = cutover_stagger_delay(session_id, cutover.generation);
|
||||||
|
warn!(
|
||||||
|
conn_id,
|
||||||
|
target_mode = cutover.mode.as_str(),
|
||||||
|
cutover_generation = cutover.generation,
|
||||||
|
delay_ms = delay.as_millis() as u64,
|
||||||
|
"Cutover affected middle session before relay start, closing client connection"
|
||||||
|
);
|
||||||
|
tokio::time::sleep(delay).await;
|
||||||
|
let _ = me_pool.send_close(conn_id).await;
|
||||||
|
me_pool.registry().unregister(conn_id).await;
|
||||||
|
stats.decrement_current_connections_me();
|
||||||
|
stats.decrement_user_curr_connects(&user);
|
||||||
|
return Err(ProxyError::Proxy(ROUTE_SWITCH_ERROR_MSG.to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Per-user ad_tag from access.user_ad_tags; fallback to general.ad_tag (hot-reloadable)
|
||||||
|
let user_tag: Option<Vec<u8>> = config
|
||||||
|
.access
|
||||||
|
.user_ad_tags
|
||||||
|
.get(&user)
|
||||||
|
.and_then(|s| hex::decode(s).ok())
|
||||||
|
.filter(|v| v.len() == 16);
|
||||||
|
let global_tag: Option<Vec<u8>> = config
|
||||||
|
.general
|
||||||
|
.ad_tag
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| hex::decode(s).ok())
|
||||||
|
.filter(|v| v.len() == 16);
|
||||||
|
let effective_tag = user_tag.or(global_tag);
|
||||||
|
|
||||||
|
let proto_flags = proto_flags_for_tag(proto_tag, effective_tag.is_some());
|
||||||
debug!(
|
debug!(
|
||||||
trace_id = format_args!("0x{:016x}", trace_id),
|
trace_id = format_args!("0x{:016x}", trace_id),
|
||||||
user = %user,
|
user = %user,
|
||||||
@@ -220,6 +318,7 @@ where
|
|||||||
peer_hash = format_args!("0x{:016x}", forensics.peer_hash),
|
peer_hash = format_args!("0x{:016x}", forensics.peer_hash),
|
||||||
desync_all_full = forensics.desync_all_full,
|
desync_all_full = forensics.desync_all_full,
|
||||||
proto_flags = format_args!("0x{:08x}", proto_flags),
|
proto_flags = format_args!("0x{:08x}", proto_flags),
|
||||||
|
pool_generation,
|
||||||
"ME relay started"
|
"ME relay started"
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -227,9 +326,15 @@ where
|
|||||||
|
|
||||||
let frame_limit = config.general.max_client_frame;
|
let frame_limit = config.general.max_client_frame;
|
||||||
|
|
||||||
let (c2me_tx, mut c2me_rx) = mpsc::channel::<C2MeCommand>(1024);
|
let c2me_channel_capacity = config
|
||||||
|
.general
|
||||||
|
.me_c2me_channel_capacity
|
||||||
|
.max(C2ME_CHANNEL_CAPACITY_FALLBACK);
|
||||||
|
let (c2me_tx, mut c2me_rx) = mpsc::channel::<C2MeCommand>(c2me_channel_capacity);
|
||||||
let me_pool_c2me = me_pool.clone();
|
let me_pool_c2me = me_pool.clone();
|
||||||
|
let effective_tag = effective_tag;
|
||||||
let c2me_sender = tokio::spawn(async move {
|
let c2me_sender = tokio::spawn(async move {
|
||||||
|
let mut sent_since_yield = 0usize;
|
||||||
while let Some(cmd) = c2me_rx.recv().await {
|
while let Some(cmd) = c2me_rx.recv().await {
|
||||||
match cmd {
|
match cmd {
|
||||||
C2MeCommand::Data { payload, flags } => {
|
C2MeCommand::Data { payload, flags } => {
|
||||||
@@ -238,9 +343,15 @@ where
|
|||||||
success.dc_idx,
|
success.dc_idx,
|
||||||
peer,
|
peer,
|
||||||
translated_local_addr,
|
translated_local_addr,
|
||||||
&payload,
|
payload.as_ref(),
|
||||||
flags,
|
flags,
|
||||||
|
effective_tag.as_deref(),
|
||||||
).await?;
|
).await?;
|
||||||
|
sent_since_yield = sent_since_yield.saturating_add(1);
|
||||||
|
if should_yield_c2me_sender(sent_since_yield, !c2me_rx.is_empty()) {
|
||||||
|
sent_since_yield = 0;
|
||||||
|
tokio::task::yield_now().await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
C2MeCommand::Close => {
|
C2MeCommand::Close => {
|
||||||
let _ = me_pool_c2me.send_close(conn_id).await;
|
let _ = me_pool_c2me.send_close(conn_id).await;
|
||||||
@@ -257,72 +368,153 @@ where
|
|||||||
let rng_clone = rng.clone();
|
let rng_clone = rng.clone();
|
||||||
let user_clone = user.clone();
|
let user_clone = user.clone();
|
||||||
let bytes_me2c_clone = bytes_me2c.clone();
|
let bytes_me2c_clone = bytes_me2c.clone();
|
||||||
|
let d2c_flush_policy = MeD2cFlushPolicy::from_config(&config);
|
||||||
let me_writer = tokio::spawn(async move {
|
let me_writer = tokio::spawn(async move {
|
||||||
let mut writer = crypto_writer;
|
let mut writer = crypto_writer;
|
||||||
let mut frame_buf = Vec::with_capacity(16 * 1024);
|
let mut frame_buf = Vec::with_capacity(16 * 1024);
|
||||||
loop {
|
loop {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
msg = me_rx_task.recv() => {
|
msg = me_rx_task.recv() => {
|
||||||
match msg {
|
let Some(first) = msg else {
|
||||||
Some(MeResponse::Data { flags, data }) => {
|
debug!(conn_id, "ME channel closed");
|
||||||
trace!(conn_id, bytes = data.len(), flags, "ME->C data");
|
return Err(ProxyError::Proxy("ME connection lost".into()));
|
||||||
bytes_me2c_clone.fetch_add(data.len() as u64, Ordering::Relaxed);
|
};
|
||||||
stats_clone.add_user_octets_to(&user_clone, data.len() as u64);
|
|
||||||
write_client_payload(
|
|
||||||
&mut writer,
|
|
||||||
proto_tag,
|
|
||||||
flags,
|
|
||||||
&data,
|
|
||||||
rng_clone.as_ref(),
|
|
||||||
&mut frame_buf,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// Drain all immediately queued ME responses and flush once.
|
let mut batch_frames = 0usize;
|
||||||
while let Ok(next) = me_rx_task.try_recv() {
|
let mut batch_bytes = 0usize;
|
||||||
match next {
|
let mut flush_immediately;
|
||||||
MeResponse::Data { flags, data } => {
|
|
||||||
trace!(conn_id, bytes = data.len(), flags, "ME->C data (batched)");
|
match process_me_writer_response(
|
||||||
bytes_me2c_clone.fetch_add(data.len() as u64, Ordering::Relaxed);
|
first,
|
||||||
stats_clone.add_user_octets_to(&user_clone, data.len() as u64);
|
|
||||||
write_client_payload(
|
|
||||||
&mut writer,
|
&mut writer,
|
||||||
proto_tag,
|
proto_tag,
|
||||||
flags,
|
|
||||||
&data,
|
|
||||||
rng_clone.as_ref(),
|
rng_clone.as_ref(),
|
||||||
&mut frame_buf,
|
&mut frame_buf,
|
||||||
).await?;
|
stats_clone.as_ref(),
|
||||||
|
&user_clone,
|
||||||
|
bytes_me2c_clone.as_ref(),
|
||||||
|
conn_id,
|
||||||
|
d2c_flush_policy.ack_flush_immediate,
|
||||||
|
false,
|
||||||
|
).await? {
|
||||||
|
MeWriterResponseOutcome::Continue { frames, bytes, flush_immediately: immediate } => {
|
||||||
|
batch_frames = batch_frames.saturating_add(frames);
|
||||||
|
batch_bytes = batch_bytes.saturating_add(bytes);
|
||||||
|
flush_immediately = immediate;
|
||||||
}
|
}
|
||||||
MeResponse::Ack(confirm) => {
|
MeWriterResponseOutcome::Close => {
|
||||||
trace!(conn_id, confirm, "ME->C quickack (batched)");
|
|
||||||
write_client_ack(&mut writer, proto_tag, confirm).await?;
|
|
||||||
}
|
|
||||||
MeResponse::Close => {
|
|
||||||
debug!(conn_id, "ME sent close (batched)");
|
|
||||||
let _ = writer.flush().await;
|
let _ = writer.flush().await;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
while !flush_immediately
|
||||||
|
&& batch_frames < d2c_flush_policy.max_frames
|
||||||
|
&& batch_bytes < d2c_flush_policy.max_bytes
|
||||||
|
{
|
||||||
|
let Ok(next) = me_rx_task.try_recv() else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
match process_me_writer_response(
|
||||||
|
next,
|
||||||
|
&mut writer,
|
||||||
|
proto_tag,
|
||||||
|
rng_clone.as_ref(),
|
||||||
|
&mut frame_buf,
|
||||||
|
stats_clone.as_ref(),
|
||||||
|
&user_clone,
|
||||||
|
bytes_me2c_clone.as_ref(),
|
||||||
|
conn_id,
|
||||||
|
d2c_flush_policy.ack_flush_immediate,
|
||||||
|
true,
|
||||||
|
).await? {
|
||||||
|
MeWriterResponseOutcome::Continue { frames, bytes, flush_immediately: immediate } => {
|
||||||
|
batch_frames = batch_frames.saturating_add(frames);
|
||||||
|
batch_bytes = batch_bytes.saturating_add(bytes);
|
||||||
|
flush_immediately |= immediate;
|
||||||
|
}
|
||||||
|
MeWriterResponseOutcome::Close => {
|
||||||
|
let _ = writer.flush().await;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !flush_immediately
|
||||||
|
&& !d2c_flush_policy.max_delay.is_zero()
|
||||||
|
&& batch_frames < d2c_flush_policy.max_frames
|
||||||
|
&& batch_bytes < d2c_flush_policy.max_bytes
|
||||||
|
{
|
||||||
|
match tokio::time::timeout(d2c_flush_policy.max_delay, me_rx_task.recv()).await {
|
||||||
|
Ok(Some(next)) => {
|
||||||
|
match process_me_writer_response(
|
||||||
|
next,
|
||||||
|
&mut writer,
|
||||||
|
proto_tag,
|
||||||
|
rng_clone.as_ref(),
|
||||||
|
&mut frame_buf,
|
||||||
|
stats_clone.as_ref(),
|
||||||
|
&user_clone,
|
||||||
|
bytes_me2c_clone.as_ref(),
|
||||||
|
conn_id,
|
||||||
|
d2c_flush_policy.ack_flush_immediate,
|
||||||
|
true,
|
||||||
|
).await? {
|
||||||
|
MeWriterResponseOutcome::Continue { frames, bytes, flush_immediately: immediate } => {
|
||||||
|
batch_frames = batch_frames.saturating_add(frames);
|
||||||
|
batch_bytes = batch_bytes.saturating_add(bytes);
|
||||||
|
flush_immediately |= immediate;
|
||||||
|
}
|
||||||
|
MeWriterResponseOutcome::Close => {
|
||||||
|
let _ = writer.flush().await;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
while !flush_immediately
|
||||||
|
&& batch_frames < d2c_flush_policy.max_frames
|
||||||
|
&& batch_bytes < d2c_flush_policy.max_bytes
|
||||||
|
{
|
||||||
|
let Ok(extra) = me_rx_task.try_recv() else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
match process_me_writer_response(
|
||||||
|
extra,
|
||||||
|
&mut writer,
|
||||||
|
proto_tag,
|
||||||
|
rng_clone.as_ref(),
|
||||||
|
&mut frame_buf,
|
||||||
|
stats_clone.as_ref(),
|
||||||
|
&user_clone,
|
||||||
|
bytes_me2c_clone.as_ref(),
|
||||||
|
conn_id,
|
||||||
|
d2c_flush_policy.ack_flush_immediate,
|
||||||
|
true,
|
||||||
|
).await? {
|
||||||
|
MeWriterResponseOutcome::Continue { frames, bytes, flush_immediately: immediate } => {
|
||||||
|
batch_frames = batch_frames.saturating_add(frames);
|
||||||
|
batch_bytes = batch_bytes.saturating_add(bytes);
|
||||||
|
flush_immediately |= immediate;
|
||||||
|
}
|
||||||
|
MeWriterResponseOutcome::Close => {
|
||||||
|
let _ = writer.flush().await;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(None) => {
|
||||||
|
debug!(conn_id, "ME channel closed");
|
||||||
|
return Err(ProxyError::Proxy("ME connection lost".into()));
|
||||||
|
}
|
||||||
|
Err(_) => {}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
writer.flush().await.map_err(ProxyError::Io)?;
|
writer.flush().await.map_err(ProxyError::Io)?;
|
||||||
}
|
}
|
||||||
Some(MeResponse::Ack(confirm)) => {
|
|
||||||
trace!(conn_id, confirm, "ME->C quickack");
|
|
||||||
write_client_ack(&mut writer, proto_tag, confirm).await?;
|
|
||||||
}
|
|
||||||
Some(MeResponse::Close) => {
|
|
||||||
debug!(conn_id, "ME sent close");
|
|
||||||
let _ = writer.flush().await;
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
debug!(conn_id, "ME channel closed");
|
|
||||||
return Err(ProxyError::Proxy("ME connection lost".into()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ = &mut stop_rx => {
|
_ = &mut stop_rx => {
|
||||||
debug!(conn_id, "ME writer stop signal");
|
debug!(conn_id, "ME writer stop signal");
|
||||||
return Ok(());
|
return Ok(());
|
||||||
@@ -334,15 +526,42 @@ where
|
|||||||
let mut main_result: Result<()> = Ok(());
|
let mut main_result: Result<()> = Ok(());
|
||||||
let mut client_closed = false;
|
let mut client_closed = false;
|
||||||
let mut frame_counter: u64 = 0;
|
let mut frame_counter: u64 = 0;
|
||||||
|
let mut route_watch_open = true;
|
||||||
loop {
|
loop {
|
||||||
match read_client_payload(
|
if let Some(cutover) = affected_cutover_state(
|
||||||
|
&route_rx,
|
||||||
|
RelayRouteMode::Middle,
|
||||||
|
route_snapshot.generation,
|
||||||
|
) {
|
||||||
|
let delay = cutover_stagger_delay(session_id, cutover.generation);
|
||||||
|
warn!(
|
||||||
|
conn_id,
|
||||||
|
target_mode = cutover.mode.as_str(),
|
||||||
|
cutover_generation = cutover.generation,
|
||||||
|
delay_ms = delay.as_millis() as u64,
|
||||||
|
"Cutover affected middle session, closing client connection"
|
||||||
|
);
|
||||||
|
tokio::time::sleep(delay).await;
|
||||||
|
let _ = enqueue_c2me_command(&c2me_tx, C2MeCommand::Close).await;
|
||||||
|
main_result = Err(ProxyError::Proxy(ROUTE_SWITCH_ERROR_MSG.to_string()));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
tokio::select! {
|
||||||
|
changed = route_rx.changed(), if route_watch_open => {
|
||||||
|
if changed.is_err() {
|
||||||
|
route_watch_open = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
payload_result = read_client_payload(
|
||||||
&mut crypto_reader,
|
&mut crypto_reader,
|
||||||
proto_tag,
|
proto_tag,
|
||||||
frame_limit,
|
frame_limit,
|
||||||
&forensics,
|
&forensics,
|
||||||
&mut frame_counter,
|
&mut frame_counter,
|
||||||
&stats,
|
&stats,
|
||||||
).await {
|
) => {
|
||||||
|
match payload_result {
|
||||||
Ok(Some((payload, quickack))) => {
|
Ok(Some((payload, quickack))) => {
|
||||||
trace!(conn_id, bytes = payload.len(), "C->ME frame");
|
trace!(conn_id, bytes = payload.len(), "C->ME frame");
|
||||||
forensics.bytes_c2me = forensics
|
forensics.bytes_c2me = forensics
|
||||||
@@ -357,8 +576,7 @@ where
|
|||||||
flags |= RPC_FLAG_NOT_ENCRYPTED;
|
flags |= RPC_FLAG_NOT_ENCRYPTED;
|
||||||
}
|
}
|
||||||
// Keep client read loop lightweight: route heavy ME send path via a dedicated task.
|
// Keep client read loop lightweight: route heavy ME send path via a dedicated task.
|
||||||
if c2me_tx
|
if enqueue_c2me_command(&c2me_tx, C2MeCommand::Data { payload, flags })
|
||||||
.send(C2MeCommand::Data { payload, flags })
|
|
||||||
.await
|
.await
|
||||||
.is_err()
|
.is_err()
|
||||||
{
|
{
|
||||||
@@ -369,7 +587,7 @@ where
|
|||||||
Ok(None) => {
|
Ok(None) => {
|
||||||
debug!(conn_id, "Client EOF");
|
debug!(conn_id, "Client EOF");
|
||||||
client_closed = true;
|
client_closed = true;
|
||||||
let _ = c2me_tx.send(C2MeCommand::Close).await;
|
let _ = enqueue_c2me_command(&c2me_tx, C2MeCommand::Close).await;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@@ -378,6 +596,8 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
drop(c2me_tx);
|
drop(c2me_tx);
|
||||||
let c2me_result = c2me_sender
|
let c2me_result = c2me_sender
|
||||||
@@ -390,14 +610,14 @@ where
|
|||||||
.unwrap_or_else(|e| Err(ProxyError::Proxy(format!("ME writer join error: {e}"))));
|
.unwrap_or_else(|e| Err(ProxyError::Proxy(format!("ME writer join error: {e}"))));
|
||||||
|
|
||||||
// When client closes, but ME channel stopped as unregistered - it isnt error
|
// When client closes, but ME channel stopped as unregistered - it isnt error
|
||||||
if client_closed {
|
if client_closed
|
||||||
if matches!(
|
&& matches!(
|
||||||
writer_result,
|
writer_result,
|
||||||
Err(ProxyError::Proxy(ref msg)) if msg == "ME connection lost"
|
Err(ProxyError::Proxy(ref msg)) if msg == "ME connection lost"
|
||||||
) {
|
)
|
||||||
|
{
|
||||||
writer_result = Ok(());
|
writer_result = Ok(());
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
let result = match (main_result, c2me_result, writer_result) {
|
let result = match (main_result, c2me_result, writer_result) {
|
||||||
(Ok(()), Ok(()), Ok(())) => Ok(()),
|
(Ok(()), Ok(()), Ok(())) => Ok(()),
|
||||||
@@ -417,6 +637,7 @@ where
|
|||||||
"ME relay cleanup"
|
"ME relay cleanup"
|
||||||
);
|
);
|
||||||
me_pool.registry().unregister(conn_id).await;
|
me_pool.registry().unregister(conn_id).await;
|
||||||
|
stats.decrement_current_connections_me();
|
||||||
stats.decrement_user_curr_connects(&user);
|
stats.decrement_user_curr_connects(&user);
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
@@ -428,7 +649,7 @@ async fn read_client_payload<R>(
|
|||||||
forensics: &RelayForensicsState,
|
forensics: &RelayForensicsState,
|
||||||
frame_counter: &mut u64,
|
frame_counter: &mut u64,
|
||||||
stats: &Stats,
|
stats: &Stats,
|
||||||
) -> Result<Option<(Vec<u8>, bool)>>
|
) -> Result<Option<(Bytes, bool)>>
|
||||||
where
|
where
|
||||||
R: AsyncRead + Unpin + Send + 'static,
|
R: AsyncRead + Unpin + Send + 'static,
|
||||||
{
|
{
|
||||||
@@ -527,7 +748,82 @@ where
|
|||||||
payload.truncate(secure_payload_len);
|
payload.truncate(secure_payload_len);
|
||||||
}
|
}
|
||||||
*frame_counter += 1;
|
*frame_counter += 1;
|
||||||
return Ok(Some((payload, quickack)));
|
return Ok(Some((Bytes::from(payload), quickack)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
enum MeWriterResponseOutcome {
|
||||||
|
Continue {
|
||||||
|
frames: usize,
|
||||||
|
bytes: usize,
|
||||||
|
flush_immediately: bool,
|
||||||
|
},
|
||||||
|
Close,
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn process_me_writer_response<W>(
|
||||||
|
response: MeResponse,
|
||||||
|
client_writer: &mut CryptoWriter<W>,
|
||||||
|
proto_tag: ProtoTag,
|
||||||
|
rng: &SecureRandom,
|
||||||
|
frame_buf: &mut Vec<u8>,
|
||||||
|
stats: &Stats,
|
||||||
|
user: &str,
|
||||||
|
bytes_me2c: &AtomicU64,
|
||||||
|
conn_id: u64,
|
||||||
|
ack_flush_immediate: bool,
|
||||||
|
batched: bool,
|
||||||
|
) -> Result<MeWriterResponseOutcome>
|
||||||
|
where
|
||||||
|
W: AsyncWrite + Unpin + Send + 'static,
|
||||||
|
{
|
||||||
|
match response {
|
||||||
|
MeResponse::Data { flags, data } => {
|
||||||
|
if batched {
|
||||||
|
trace!(conn_id, bytes = data.len(), flags, "ME->C data (batched)");
|
||||||
|
} else {
|
||||||
|
trace!(conn_id, bytes = data.len(), flags, "ME->C data");
|
||||||
|
}
|
||||||
|
bytes_me2c.fetch_add(data.len() as u64, Ordering::Relaxed);
|
||||||
|
stats.add_user_octets_to(user, data.len() as u64);
|
||||||
|
write_client_payload(
|
||||||
|
client_writer,
|
||||||
|
proto_tag,
|
||||||
|
flags,
|
||||||
|
&data,
|
||||||
|
rng,
|
||||||
|
frame_buf,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(MeWriterResponseOutcome::Continue {
|
||||||
|
frames: 1,
|
||||||
|
bytes: data.len(),
|
||||||
|
flush_immediately: false,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
MeResponse::Ack(confirm) => {
|
||||||
|
if batched {
|
||||||
|
trace!(conn_id, confirm, "ME->C quickack (batched)");
|
||||||
|
} else {
|
||||||
|
trace!(conn_id, confirm, "ME->C quickack");
|
||||||
|
}
|
||||||
|
write_client_ack(client_writer, proto_tag, confirm).await?;
|
||||||
|
|
||||||
|
Ok(MeWriterResponseOutcome::Continue {
|
||||||
|
frames: 1,
|
||||||
|
bytes: 4,
|
||||||
|
flush_immediately: ack_flush_immediate,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
MeResponse::Close => {
|
||||||
|
if batched {
|
||||||
|
debug!(conn_id, "ME sent close (batched)");
|
||||||
|
} else {
|
||||||
|
debug!(conn_id, "ME sent close");
|
||||||
|
}
|
||||||
|
Ok(MeWriterResponseOutcome::Close)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -546,7 +842,7 @@ where
|
|||||||
|
|
||||||
match proto_tag {
|
match proto_tag {
|
||||||
ProtoTag::Abridged => {
|
ProtoTag::Abridged => {
|
||||||
if data.len() % 4 != 0 {
|
if !data.len().is_multiple_of(4) {
|
||||||
return Err(ProxyError::Proxy(format!(
|
return Err(ProxyError::Proxy(format!(
|
||||||
"Abridged payload must be 4-byte aligned, got {}",
|
"Abridged payload must be 4-byte aligned, got {}",
|
||||||
data.len()
|
data.len()
|
||||||
@@ -564,7 +860,7 @@ where
|
|||||||
frame_buf.push(first);
|
frame_buf.push(first);
|
||||||
frame_buf.extend_from_slice(data);
|
frame_buf.extend_from_slice(data);
|
||||||
client_writer
|
client_writer
|
||||||
.write_all(&frame_buf)
|
.write_all(frame_buf)
|
||||||
.await
|
.await
|
||||||
.map_err(ProxyError::Io)?;
|
.map_err(ProxyError::Io)?;
|
||||||
} else if len_words < (1 << 24) {
|
} else if len_words < (1 << 24) {
|
||||||
@@ -578,7 +874,7 @@ where
|
|||||||
frame_buf.extend_from_slice(&[first, lw[0], lw[1], lw[2]]);
|
frame_buf.extend_from_slice(&[first, lw[0], lw[1], lw[2]]);
|
||||||
frame_buf.extend_from_slice(data);
|
frame_buf.extend_from_slice(data);
|
||||||
client_writer
|
client_writer
|
||||||
.write_all(&frame_buf)
|
.write_all(frame_buf)
|
||||||
.await
|
.await
|
||||||
.map_err(ProxyError::Io)?;
|
.map_err(ProxyError::Io)?;
|
||||||
} else {
|
} else {
|
||||||
@@ -615,7 +911,7 @@ where
|
|||||||
rng.fill(&mut frame_buf[start..]);
|
rng.fill(&mut frame_buf[start..]);
|
||||||
}
|
}
|
||||||
client_writer
|
client_writer
|
||||||
.write_all(&frame_buf)
|
.write_all(frame_buf)
|
||||||
.await
|
.await
|
||||||
.map_err(ProxyError::Io)?;
|
.map_err(ProxyError::Io)?;
|
||||||
}
|
}
|
||||||
@@ -640,7 +936,86 @@ where
|
|||||||
client_writer
|
client_writer
|
||||||
.write_all(&bytes)
|
.write_all(&bytes)
|
||||||
.await
|
.await
|
||||||
.map_err(ProxyError::Io)?;
|
.map_err(ProxyError::Io)
|
||||||
// ACK should remain low-latency.
|
}
|
||||||
client_writer.flush().await.map_err(ProxyError::Io)
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use tokio::time::{Duration as TokioDuration, timeout};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_yield_sender_only_on_budget_with_backlog() {
|
||||||
|
assert!(!should_yield_c2me_sender(0, true));
|
||||||
|
assert!(!should_yield_c2me_sender(C2ME_SENDER_FAIRNESS_BUDGET - 1, true));
|
||||||
|
assert!(!should_yield_c2me_sender(C2ME_SENDER_FAIRNESS_BUDGET, false));
|
||||||
|
assert!(should_yield_c2me_sender(C2ME_SENDER_FAIRNESS_BUDGET, true));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn enqueue_c2me_command_uses_try_send_fast_path() {
|
||||||
|
let (tx, mut rx) = mpsc::channel::<C2MeCommand>(2);
|
||||||
|
enqueue_c2me_command(
|
||||||
|
&tx,
|
||||||
|
C2MeCommand::Data {
|
||||||
|
payload: Bytes::from_static(&[1, 2, 3]),
|
||||||
|
flags: 0,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let recv = timeout(TokioDuration::from_millis(50), rx.recv())
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
match recv {
|
||||||
|
C2MeCommand::Data { payload, flags } => {
|
||||||
|
assert_eq!(payload.as_ref(), &[1, 2, 3]);
|
||||||
|
assert_eq!(flags, 0);
|
||||||
|
}
|
||||||
|
C2MeCommand::Close => panic!("unexpected close command"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn enqueue_c2me_command_falls_back_to_send_when_queue_is_full() {
|
||||||
|
let (tx, mut rx) = mpsc::channel::<C2MeCommand>(1);
|
||||||
|
tx.send(C2MeCommand::Data {
|
||||||
|
payload: Bytes::from_static(&[9]),
|
||||||
|
flags: 9,
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let tx2 = tx.clone();
|
||||||
|
let producer = tokio::spawn(async move {
|
||||||
|
enqueue_c2me_command(
|
||||||
|
&tx2,
|
||||||
|
C2MeCommand::Data {
|
||||||
|
payload: Bytes::from_static(&[7, 7]),
|
||||||
|
flags: 7,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
});
|
||||||
|
|
||||||
|
let _ = timeout(TokioDuration::from_millis(100), rx.recv())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
producer.await.unwrap();
|
||||||
|
|
||||||
|
let recv = timeout(TokioDuration::from_millis(100), rx.recv())
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
match recv {
|
||||||
|
C2MeCommand::Data { payload, flags } => {
|
||||||
|
assert_eq!(payload.as_ref(), &[7, 7]);
|
||||||
|
assert_eq!(flags, 7);
|
||||||
|
}
|
||||||
|
C2MeCommand::Close => panic!("unexpected close command"),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,9 +5,13 @@ pub mod direct_relay;
|
|||||||
pub mod handshake;
|
pub mod handshake;
|
||||||
pub mod masking;
|
pub mod masking;
|
||||||
pub mod middle_relay;
|
pub mod middle_relay;
|
||||||
|
pub mod route_mode;
|
||||||
pub mod relay;
|
pub mod relay;
|
||||||
|
|
||||||
pub use client::ClientHandler;
|
pub use client::ClientHandler;
|
||||||
|
#[allow(unused_imports)]
|
||||||
pub use handshake::*;
|
pub use handshake::*;
|
||||||
|
#[allow(unused_imports)]
|
||||||
pub use masking::*;
|
pub use masking::*;
|
||||||
|
#[allow(unused_imports)]
|
||||||
pub use relay::*;
|
pub use relay::*;
|
||||||
|
|||||||
@@ -57,7 +57,9 @@ use std::sync::Arc;
|
|||||||
use std::sync::atomic::{AtomicU64, Ordering};
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf, copy_bidirectional};
|
use tokio::io::{
|
||||||
|
AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf, copy_bidirectional_with_sizes,
|
||||||
|
};
|
||||||
use tokio::time::Instant;
|
use tokio::time::Instant;
|
||||||
use tracing::{debug, trace, warn};
|
use tracing::{debug, trace, warn};
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
@@ -296,9 +298,8 @@ impl<S: AsyncWrite + Unpin> AsyncWrite for StatsIo<S> {
|
|||||||
///
|
///
|
||||||
/// ## API compatibility
|
/// ## API compatibility
|
||||||
///
|
///
|
||||||
/// Signature is identical to the previous implementation. The `_buffer_pool`
|
/// The `_buffer_pool` parameter is retained for call-site compatibility.
|
||||||
/// parameter is retained for call-site compatibility — `copy_bidirectional`
|
/// Effective relay copy buffers are configured by `c2s_buf_size` / `s2c_buf_size`.
|
||||||
/// manages its own internal buffers (8 KB per direction).
|
|
||||||
///
|
///
|
||||||
/// ## Guarantees preserved
|
/// ## Guarantees preserved
|
||||||
///
|
///
|
||||||
@@ -312,6 +313,8 @@ pub async fn relay_bidirectional<CR, CW, SR, SW>(
|
|||||||
client_writer: CW,
|
client_writer: CW,
|
||||||
server_reader: SR,
|
server_reader: SR,
|
||||||
server_writer: SW,
|
server_writer: SW,
|
||||||
|
c2s_buf_size: usize,
|
||||||
|
s2c_buf_size: usize,
|
||||||
user: &str,
|
user: &str,
|
||||||
stats: Arc<Stats>,
|
stats: Arc<Stats>,
|
||||||
_buffer_pool: Arc<BufferPool>,
|
_buffer_pool: Arc<BufferPool>,
|
||||||
@@ -402,7 +405,12 @@ where
|
|||||||
// When the watchdog fires, select! drops the copy future,
|
// When the watchdog fires, select! drops the copy future,
|
||||||
// releasing the &mut borrows on client and server.
|
// releasing the &mut borrows on client and server.
|
||||||
let copy_result = tokio::select! {
|
let copy_result = tokio::select! {
|
||||||
result = copy_bidirectional(&mut client, &mut server) => Some(result),
|
result = copy_bidirectional_with_sizes(
|
||||||
|
&mut client,
|
||||||
|
&mut server,
|
||||||
|
c2s_buf_size.max(1),
|
||||||
|
s2c_buf_size.max(1),
|
||||||
|
) => Some(result),
|
||||||
_ = watchdog => None, // Activity timeout — cancel relay
|
_ = watchdog => None, // Activity timeout — cancel relay
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
142
src/proxy/route_mode.rs
Normal file
142
src/proxy/route_mode.rs
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicU8, AtomicU64, Ordering};
|
||||||
|
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
|
use tokio::sync::watch;
|
||||||
|
|
||||||
|
pub(crate) const ROUTE_SWITCH_ERROR_MSG: &str = "Route mode switched by cutover";
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||||
|
#[repr(u8)]
|
||||||
|
pub(crate) enum RelayRouteMode {
|
||||||
|
Direct = 0,
|
||||||
|
Middle = 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RelayRouteMode {
|
||||||
|
pub(crate) fn as_u8(self) -> u8 {
|
||||||
|
self as u8
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn from_u8(value: u8) -> Self {
|
||||||
|
match value {
|
||||||
|
1 => Self::Middle,
|
||||||
|
_ => Self::Direct,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn as_str(self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
Self::Direct => "direct",
|
||||||
|
Self::Middle => "middle",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||||
|
pub(crate) struct RouteCutoverState {
|
||||||
|
pub mode: RelayRouteMode,
|
||||||
|
pub generation: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(crate) struct RouteRuntimeController {
|
||||||
|
mode: Arc<AtomicU8>,
|
||||||
|
generation: Arc<AtomicU64>,
|
||||||
|
direct_since_epoch_secs: Arc<AtomicU64>,
|
||||||
|
tx: watch::Sender<RouteCutoverState>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RouteRuntimeController {
|
||||||
|
pub(crate) fn new(initial_mode: RelayRouteMode) -> Self {
|
||||||
|
let initial = RouteCutoverState {
|
||||||
|
mode: initial_mode,
|
||||||
|
generation: 0,
|
||||||
|
};
|
||||||
|
let (tx, _rx) = watch::channel(initial);
|
||||||
|
let direct_since_epoch_secs = if matches!(initial_mode, RelayRouteMode::Direct) {
|
||||||
|
now_epoch_secs()
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
Self {
|
||||||
|
mode: Arc::new(AtomicU8::new(initial_mode.as_u8())),
|
||||||
|
generation: Arc::new(AtomicU64::new(0)),
|
||||||
|
direct_since_epoch_secs: Arc::new(AtomicU64::new(direct_since_epoch_secs)),
|
||||||
|
tx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn snapshot(&self) -> RouteCutoverState {
|
||||||
|
RouteCutoverState {
|
||||||
|
mode: RelayRouteMode::from_u8(self.mode.load(Ordering::Relaxed)),
|
||||||
|
generation: self.generation.load(Ordering::Relaxed),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn subscribe(&self) -> watch::Receiver<RouteCutoverState> {
|
||||||
|
self.tx.subscribe()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn direct_since_epoch_secs(&self) -> Option<u64> {
|
||||||
|
let value = self.direct_since_epoch_secs.load(Ordering::Relaxed);
|
||||||
|
(value > 0).then_some(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn set_mode(&self, mode: RelayRouteMode) -> Option<RouteCutoverState> {
|
||||||
|
let previous = self.mode.swap(mode.as_u8(), Ordering::Relaxed);
|
||||||
|
if previous == mode.as_u8() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
if matches!(mode, RelayRouteMode::Direct) {
|
||||||
|
self.direct_since_epoch_secs
|
||||||
|
.store(now_epoch_secs(), Ordering::Relaxed);
|
||||||
|
} else {
|
||||||
|
self.direct_since_epoch_secs.store(0, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
let generation = self.generation.fetch_add(1, Ordering::Relaxed) + 1;
|
||||||
|
let next = RouteCutoverState { mode, generation };
|
||||||
|
self.tx.send_replace(next);
|
||||||
|
Some(next)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn now_epoch_secs() -> u64 {
|
||||||
|
SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.map(|value| value.as_secs())
|
||||||
|
.unwrap_or(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn is_session_affected_by_cutover(
|
||||||
|
current: RouteCutoverState,
|
||||||
|
_session_mode: RelayRouteMode,
|
||||||
|
session_generation: u64,
|
||||||
|
) -> bool {
|
||||||
|
current.generation > session_generation
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn affected_cutover_state(
|
||||||
|
rx: &watch::Receiver<RouteCutoverState>,
|
||||||
|
session_mode: RelayRouteMode,
|
||||||
|
session_generation: u64,
|
||||||
|
) -> Option<RouteCutoverState> {
|
||||||
|
let current = *rx.borrow();
|
||||||
|
if is_session_affected_by_cutover(current, session_mode, session_generation) {
|
||||||
|
return Some(current);
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn cutover_stagger_delay(session_id: u64, generation: u64) -> Duration {
|
||||||
|
let mut value = session_id
|
||||||
|
^ generation.rotate_left(17)
|
||||||
|
^ 0x9e37_79b9_7f4a_7c15;
|
||||||
|
value ^= value >> 30;
|
||||||
|
value = value.wrapping_mul(0xbf58_476d_1ce4_e5b9);
|
||||||
|
value ^= value >> 27;
|
||||||
|
value = value.wrapping_mul(0x94d0_49bb_1331_11eb);
|
||||||
|
value ^= value >> 31;
|
||||||
|
let ms = 1000 + (value % 1000);
|
||||||
|
Duration::from_millis(ms)
|
||||||
|
}
|
||||||
373
src/startup.rs
Normal file
373
src/startup.rs
Normal file
@@ -0,0 +1,373 @@
|
|||||||
|
use std::time::{Instant, SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
|
pub const COMPONENT_CONFIG_LOAD: &str = "config_load";
|
||||||
|
pub const COMPONENT_TRACING_INIT: &str = "tracing_init";
|
||||||
|
pub const COMPONENT_API_BOOTSTRAP: &str = "api_bootstrap";
|
||||||
|
pub const COMPONENT_TLS_FRONT_BOOTSTRAP: &str = "tls_front_bootstrap";
|
||||||
|
pub const COMPONENT_NETWORK_PROBE: &str = "network_probe";
|
||||||
|
pub const COMPONENT_ME_SECRET_FETCH: &str = "me_secret_fetch";
|
||||||
|
pub const COMPONENT_ME_PROXY_CONFIG_V4: &str = "me_proxy_config_fetch_v4";
|
||||||
|
pub const COMPONENT_ME_PROXY_CONFIG_V6: &str = "me_proxy_config_fetch_v6";
|
||||||
|
pub const COMPONENT_ME_POOL_CONSTRUCT: &str = "me_pool_construct";
|
||||||
|
pub const COMPONENT_ME_POOL_INIT_STAGE1: &str = "me_pool_init_stage1";
|
||||||
|
pub const COMPONENT_ME_CONNECTIVITY_PING: &str = "me_connectivity_ping";
|
||||||
|
pub const COMPONENT_DC_CONNECTIVITY_PING: &str = "dc_connectivity_ping";
|
||||||
|
pub const COMPONENT_LISTENERS_BIND: &str = "listeners_bind";
|
||||||
|
pub const COMPONENT_CONFIG_WATCHER_START: &str = "config_watcher_start";
|
||||||
|
pub const COMPONENT_METRICS_START: &str = "metrics_start";
|
||||||
|
pub const COMPONENT_RUNTIME_READY: &str = "runtime_ready";
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||||
|
pub enum StartupStatus {
|
||||||
|
Initializing,
|
||||||
|
Ready,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StartupStatus {
|
||||||
|
pub fn as_str(self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
Self::Initializing => "initializing",
|
||||||
|
Self::Ready => "ready",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||||
|
pub enum StartupComponentStatus {
|
||||||
|
Pending,
|
||||||
|
Running,
|
||||||
|
Ready,
|
||||||
|
Failed,
|
||||||
|
Skipped,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StartupComponentStatus {
|
||||||
|
pub fn as_str(self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
Self::Pending => "pending",
|
||||||
|
Self::Running => "running",
|
||||||
|
Self::Ready => "ready",
|
||||||
|
Self::Failed => "failed",
|
||||||
|
Self::Skipped => "skipped",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||||
|
pub enum StartupMeStatus {
|
||||||
|
Pending,
|
||||||
|
Initializing,
|
||||||
|
Ready,
|
||||||
|
Failed,
|
||||||
|
Skipped,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StartupMeStatus {
|
||||||
|
pub fn as_str(self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
Self::Pending => "pending",
|
||||||
|
Self::Initializing => "initializing",
|
||||||
|
Self::Ready => "ready",
|
||||||
|
Self::Failed => "failed",
|
||||||
|
Self::Skipped => "skipped",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct StartupComponentSnapshot {
|
||||||
|
pub id: &'static str,
|
||||||
|
pub title: &'static str,
|
||||||
|
pub weight: f64,
|
||||||
|
pub status: StartupComponentStatus,
|
||||||
|
pub started_at_epoch_ms: Option<u64>,
|
||||||
|
pub finished_at_epoch_ms: Option<u64>,
|
||||||
|
pub duration_ms: Option<u64>,
|
||||||
|
pub attempts: u32,
|
||||||
|
pub details: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct StartupMeSnapshot {
|
||||||
|
pub status: StartupMeStatus,
|
||||||
|
pub current_stage: String,
|
||||||
|
pub init_attempt: u32,
|
||||||
|
pub retry_limit: String,
|
||||||
|
pub last_error: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct StartupSnapshot {
|
||||||
|
pub status: StartupStatus,
|
||||||
|
pub degraded: bool,
|
||||||
|
pub current_stage: String,
|
||||||
|
pub started_at_epoch_secs: u64,
|
||||||
|
pub ready_at_epoch_secs: Option<u64>,
|
||||||
|
pub total_elapsed_ms: u64,
|
||||||
|
pub transport_mode: String,
|
||||||
|
pub me: StartupMeSnapshot,
|
||||||
|
pub components: Vec<StartupComponentSnapshot>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
struct StartupComponent {
|
||||||
|
id: &'static str,
|
||||||
|
title: &'static str,
|
||||||
|
weight: f64,
|
||||||
|
status: StartupComponentStatus,
|
||||||
|
started_at_epoch_ms: Option<u64>,
|
||||||
|
finished_at_epoch_ms: Option<u64>,
|
||||||
|
duration_ms: Option<u64>,
|
||||||
|
attempts: u32,
|
||||||
|
details: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
struct StartupState {
|
||||||
|
status: StartupStatus,
|
||||||
|
degraded: bool,
|
||||||
|
current_stage: String,
|
||||||
|
started_at_epoch_secs: u64,
|
||||||
|
ready_at_epoch_secs: Option<u64>,
|
||||||
|
transport_mode: String,
|
||||||
|
me: StartupMeSnapshot,
|
||||||
|
components: Vec<StartupComponent>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct StartupTracker {
|
||||||
|
started_at_instant: Instant,
|
||||||
|
state: RwLock<StartupState>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StartupTracker {
|
||||||
|
pub fn new(started_at_epoch_secs: u64) -> Self {
|
||||||
|
Self {
|
||||||
|
started_at_instant: Instant::now(),
|
||||||
|
state: RwLock::new(StartupState {
|
||||||
|
status: StartupStatus::Initializing,
|
||||||
|
degraded: false,
|
||||||
|
current_stage: COMPONENT_CONFIG_LOAD.to_string(),
|
||||||
|
started_at_epoch_secs,
|
||||||
|
ready_at_epoch_secs: None,
|
||||||
|
transport_mode: "unknown".to_string(),
|
||||||
|
me: StartupMeSnapshot {
|
||||||
|
status: StartupMeStatus::Pending,
|
||||||
|
current_stage: "pending".to_string(),
|
||||||
|
init_attempt: 0,
|
||||||
|
retry_limit: "unlimited".to_string(),
|
||||||
|
last_error: None,
|
||||||
|
},
|
||||||
|
components: component_blueprint(),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn set_transport_mode(&self, mode: &'static str) {
|
||||||
|
self.state.write().await.transport_mode = mode.to_string();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn set_degraded(&self, degraded: bool) {
|
||||||
|
self.state.write().await.degraded = degraded;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn start_component(&self, id: &'static str, details: Option<String>) {
|
||||||
|
let mut guard = self.state.write().await;
|
||||||
|
guard.current_stage = id.to_string();
|
||||||
|
if let Some(component) = guard.components.iter_mut().find(|component| component.id == id) {
|
||||||
|
if component.started_at_epoch_ms.is_none() {
|
||||||
|
component.started_at_epoch_ms = Some(now_epoch_ms());
|
||||||
|
}
|
||||||
|
component.attempts = component.attempts.saturating_add(1);
|
||||||
|
component.status = StartupComponentStatus::Running;
|
||||||
|
component.details = normalize_details(details);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn complete_component(&self, id: &'static str, details: Option<String>) {
|
||||||
|
self.finish_component(id, StartupComponentStatus::Ready, details)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn fail_component(&self, id: &'static str, details: Option<String>) {
|
||||||
|
self.finish_component(id, StartupComponentStatus::Failed, details)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn skip_component(&self, id: &'static str, details: Option<String>) {
|
||||||
|
self.finish_component(id, StartupComponentStatus::Skipped, details)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn finish_component(
|
||||||
|
&self,
|
||||||
|
id: &'static str,
|
||||||
|
status: StartupComponentStatus,
|
||||||
|
details: Option<String>,
|
||||||
|
) {
|
||||||
|
let mut guard = self.state.write().await;
|
||||||
|
let finished_at = now_epoch_ms();
|
||||||
|
if let Some(component) = guard.components.iter_mut().find(|component| component.id == id) {
|
||||||
|
if component.started_at_epoch_ms.is_none() {
|
||||||
|
component.started_at_epoch_ms = Some(finished_at);
|
||||||
|
component.attempts = component.attempts.saturating_add(1);
|
||||||
|
}
|
||||||
|
component.finished_at_epoch_ms = Some(finished_at);
|
||||||
|
component.duration_ms = component
|
||||||
|
.started_at_epoch_ms
|
||||||
|
.map(|started_at| finished_at.saturating_sub(started_at));
|
||||||
|
component.status = status;
|
||||||
|
component.details = normalize_details(details);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn set_me_status(&self, status: StartupMeStatus, stage: &'static str) {
|
||||||
|
let mut guard = self.state.write().await;
|
||||||
|
guard.me.status = status;
|
||||||
|
guard.me.current_stage = stage.to_string();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn set_me_retry_limit(&self, retry_limit: String) {
|
||||||
|
self.state.write().await.me.retry_limit = retry_limit;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn set_me_init_attempt(&self, attempt: u32) {
|
||||||
|
self.state.write().await.me.init_attempt = attempt;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn set_me_last_error(&self, error: Option<String>) {
|
||||||
|
self.state.write().await.me.last_error = normalize_details(error);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn mark_ready(&self) {
|
||||||
|
let mut guard = self.state.write().await;
|
||||||
|
if guard.status == StartupStatus::Ready {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
guard.status = StartupStatus::Ready;
|
||||||
|
guard.current_stage = "ready".to_string();
|
||||||
|
guard.ready_at_epoch_secs = Some(now_epoch_secs());
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn snapshot(&self) -> StartupSnapshot {
|
||||||
|
let guard = self.state.read().await;
|
||||||
|
StartupSnapshot {
|
||||||
|
status: guard.status,
|
||||||
|
degraded: guard.degraded,
|
||||||
|
current_stage: guard.current_stage.clone(),
|
||||||
|
started_at_epoch_secs: guard.started_at_epoch_secs,
|
||||||
|
ready_at_epoch_secs: guard.ready_at_epoch_secs,
|
||||||
|
total_elapsed_ms: self.started_at_instant.elapsed().as_millis() as u64,
|
||||||
|
transport_mode: guard.transport_mode.clone(),
|
||||||
|
me: guard.me.clone(),
|
||||||
|
components: guard
|
||||||
|
.components
|
||||||
|
.iter()
|
||||||
|
.map(|component| StartupComponentSnapshot {
|
||||||
|
id: component.id,
|
||||||
|
title: component.title,
|
||||||
|
weight: component.weight,
|
||||||
|
status: component.status,
|
||||||
|
started_at_epoch_ms: component.started_at_epoch_ms,
|
||||||
|
finished_at_epoch_ms: component.finished_at_epoch_ms,
|
||||||
|
duration_ms: component.duration_ms,
|
||||||
|
attempts: component.attempts,
|
||||||
|
details: component.details.clone(),
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn compute_progress_pct(snapshot: &StartupSnapshot, me_stage_progress: Option<f64>) -> f64 {
|
||||||
|
if snapshot.status == StartupStatus::Ready {
|
||||||
|
return 100.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut total_weight = 0.0f64;
|
||||||
|
let mut completed_weight = 0.0f64;
|
||||||
|
|
||||||
|
for component in &snapshot.components {
|
||||||
|
total_weight += component.weight;
|
||||||
|
let unit_progress = match component.status {
|
||||||
|
StartupComponentStatus::Pending => 0.0,
|
||||||
|
StartupComponentStatus::Running => {
|
||||||
|
if component.id == COMPONENT_ME_POOL_INIT_STAGE1 {
|
||||||
|
me_stage_progress.unwrap_or(0.0).clamp(0.0, 1.0)
|
||||||
|
} else {
|
||||||
|
0.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
StartupComponentStatus::Ready
|
||||||
|
| StartupComponentStatus::Failed
|
||||||
|
| StartupComponentStatus::Skipped => 1.0,
|
||||||
|
};
|
||||||
|
completed_weight += component.weight * unit_progress;
|
||||||
|
}
|
||||||
|
|
||||||
|
if total_weight <= f64::EPSILON {
|
||||||
|
0.0
|
||||||
|
} else {
|
||||||
|
((completed_weight / total_weight) * 100.0).clamp(0.0, 100.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn component_blueprint() -> Vec<StartupComponent> {
|
||||||
|
vec![
|
||||||
|
component(COMPONENT_CONFIG_LOAD, "Config load", 5.0),
|
||||||
|
component(COMPONENT_TRACING_INIT, "Tracing init", 3.0),
|
||||||
|
component(COMPONENT_API_BOOTSTRAP, "API bootstrap", 5.0),
|
||||||
|
component(COMPONENT_TLS_FRONT_BOOTSTRAP, "TLS front bootstrap", 5.0),
|
||||||
|
component(COMPONENT_NETWORK_PROBE, "Network probe", 10.0),
|
||||||
|
component(COMPONENT_ME_SECRET_FETCH, "ME secret fetch", 8.0),
|
||||||
|
component(COMPONENT_ME_PROXY_CONFIG_V4, "ME config v4 fetch", 4.0),
|
||||||
|
component(COMPONENT_ME_PROXY_CONFIG_V6, "ME config v6 fetch", 4.0),
|
||||||
|
component(COMPONENT_ME_POOL_CONSTRUCT, "ME pool construct", 6.0),
|
||||||
|
component(COMPONENT_ME_POOL_INIT_STAGE1, "ME pool init stage1", 24.0),
|
||||||
|
component(COMPONENT_ME_CONNECTIVITY_PING, "ME connectivity ping", 6.0),
|
||||||
|
component(COMPONENT_DC_CONNECTIVITY_PING, "DC connectivity ping", 8.0),
|
||||||
|
component(COMPONENT_LISTENERS_BIND, "Listener bind", 8.0),
|
||||||
|
component(COMPONENT_CONFIG_WATCHER_START, "Config watcher start", 2.0),
|
||||||
|
component(COMPONENT_METRICS_START, "Metrics start", 1.0),
|
||||||
|
component(COMPONENT_RUNTIME_READY, "Runtime ready", 1.0),
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
fn component(id: &'static str, title: &'static str, weight: f64) -> StartupComponent {
|
||||||
|
StartupComponent {
|
||||||
|
id,
|
||||||
|
title,
|
||||||
|
weight,
|
||||||
|
status: StartupComponentStatus::Pending,
|
||||||
|
started_at_epoch_ms: None,
|
||||||
|
finished_at_epoch_ms: None,
|
||||||
|
duration_ms: None,
|
||||||
|
attempts: 0,
|
||||||
|
details: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn normalize_details(details: Option<String>) -> Option<String> {
|
||||||
|
details.map(|detail| {
|
||||||
|
if detail.len() <= 256 {
|
||||||
|
detail
|
||||||
|
} else {
|
||||||
|
detail[..256].to_string()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn now_epoch_secs() -> u64 {
|
||||||
|
SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap_or_default()
|
||||||
|
.as_secs()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn now_epoch_ms() -> u64 {
|
||||||
|
SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap_or_default()
|
||||||
|
.as_millis() as u64
|
||||||
|
}
|
||||||
117
src/stats/beobachten.rs
Normal file
117
src/stats/beobachten.rs
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
//! Per-IP forensic buckets for scanner and handshake failure observation.
|
||||||
|
|
||||||
|
use std::collections::{BTreeMap, HashMap};
|
||||||
|
use std::net::IpAddr;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use parking_lot::Mutex;
|
||||||
|
|
||||||
|
const CLEANUP_INTERVAL: Duration = Duration::from_secs(30);
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
struct BeobachtenInner {
|
||||||
|
entries: HashMap<(String, IpAddr), BeobachtenEntry>,
|
||||||
|
last_cleanup: Option<Instant>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
struct BeobachtenEntry {
|
||||||
|
tries: u64,
|
||||||
|
last_seen: Instant,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// In-memory, TTL-scoped per-IP counters keyed by source class.
|
||||||
|
pub struct BeobachtenStore {
|
||||||
|
inner: Mutex<BeobachtenInner>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for BeobachtenStore {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BeobachtenStore {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
inner: Mutex::new(BeobachtenInner::default()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn record(&self, class: &str, ip: IpAddr, ttl: Duration) {
|
||||||
|
if class.is_empty() || ttl.is_zero() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let now = Instant::now();
|
||||||
|
let mut guard = self.inner.lock();
|
||||||
|
Self::cleanup_if_needed(&mut guard, now, ttl);
|
||||||
|
|
||||||
|
let key = (class.to_string(), ip);
|
||||||
|
let entry = guard.entries.entry(key).or_insert(BeobachtenEntry {
|
||||||
|
tries: 0,
|
||||||
|
last_seen: now,
|
||||||
|
});
|
||||||
|
entry.tries = entry.tries.saturating_add(1);
|
||||||
|
entry.last_seen = now;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn snapshot_text(&self, ttl: Duration) -> String {
|
||||||
|
if ttl.is_zero() {
|
||||||
|
return "beobachten disabled\n".to_string();
|
||||||
|
}
|
||||||
|
|
||||||
|
let now = Instant::now();
|
||||||
|
let mut guard = self.inner.lock();
|
||||||
|
Self::cleanup(&mut guard, now, ttl);
|
||||||
|
guard.last_cleanup = Some(now);
|
||||||
|
|
||||||
|
let mut grouped = BTreeMap::<String, Vec<(IpAddr, u64)>>::new();
|
||||||
|
for ((class, ip), entry) in &guard.entries {
|
||||||
|
grouped
|
||||||
|
.entry(class.clone())
|
||||||
|
.or_default()
|
||||||
|
.push((*ip, entry.tries));
|
||||||
|
}
|
||||||
|
|
||||||
|
if grouped.is_empty() {
|
||||||
|
return "empty\n".to_string();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut out = String::with_capacity(grouped.len() * 64);
|
||||||
|
for (class, entries) in &mut grouped {
|
||||||
|
out.push('[');
|
||||||
|
out.push_str(class);
|
||||||
|
out.push_str("]\n");
|
||||||
|
|
||||||
|
entries.sort_by(|(ip_a, tries_a), (ip_b, tries_b)| {
|
||||||
|
tries_b
|
||||||
|
.cmp(tries_a)
|
||||||
|
.then_with(|| ip_a.to_string().cmp(&ip_b.to_string()))
|
||||||
|
});
|
||||||
|
|
||||||
|
for (ip, tries) in entries {
|
||||||
|
out.push_str(&format!("{ip}-{tries}\n"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cleanup_if_needed(inner: &mut BeobachtenInner, now: Instant, ttl: Duration) {
|
||||||
|
let should_cleanup = match inner.last_cleanup {
|
||||||
|
Some(last) => now.saturating_duration_since(last) >= CLEANUP_INTERVAL,
|
||||||
|
None => true,
|
||||||
|
};
|
||||||
|
if should_cleanup {
|
||||||
|
Self::cleanup(inner, now, ttl);
|
||||||
|
inner.last_cleanup = Some(now);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cleanup(inner: &mut BeobachtenInner, now: Instant, ttl: Duration) {
|
||||||
|
inner.entries.retain(|_, entry| {
|
||||||
|
now.saturating_duration_since(entry.last_seen) <= ttl
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
1294
src/stats/mod.rs
1294
src/stats/mod.rs
File diff suppressed because it is too large
Load Diff
29
src/stats/telemetry.rs
Normal file
29
src/stats/telemetry.rs
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
use crate::config::{MeTelemetryLevel, TelemetryConfig};
|
||||||
|
|
||||||
|
/// Runtime telemetry policy used by hot-path counters.
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
pub struct TelemetryPolicy {
|
||||||
|
pub core_enabled: bool,
|
||||||
|
pub user_enabled: bool,
|
||||||
|
pub me_level: MeTelemetryLevel,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for TelemetryPolicy {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
core_enabled: true,
|
||||||
|
user_enabled: true,
|
||||||
|
me_level: MeTelemetryLevel::Normal,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TelemetryPolicy {
|
||||||
|
pub fn from_config(cfg: &TelemetryConfig) -> Self {
|
||||||
|
Self {
|
||||||
|
core_enabled: cfg.core_enabled,
|
||||||
|
user_enabled: cfg.user_enabled,
|
||||||
|
me_level: cfg.me_level,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -3,6 +3,8 @@
|
|||||||
//! This module provides a thread-safe pool of BytesMut buffers
|
//! This module provides a thread-safe pool of BytesMut buffers
|
||||||
//! that can be reused across connections to reduce allocation pressure.
|
//! that can be reused across connections to reduce allocation pressure.
|
||||||
|
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use bytes::BytesMut;
|
use bytes::BytesMut;
|
||||||
use crossbeam_queue::ArrayQueue;
|
use crossbeam_queue::ArrayQueue;
|
||||||
use std::ops::{Deref, DerefMut};
|
use std::ops::{Deref, DerefMut};
|
||||||
|
|||||||
@@ -18,6 +18,8 @@
|
|||||||
//! is either written to upstream or stored in our pending buffer
|
//! is either written to upstream or stored in our pending buffer
|
||||||
//! - when upstream is pending -> ciphertext is buffered/bounded and backpressure is applied
|
//! - when upstream is pending -> ciphertext is buffered/bounded and backpressure is applied
|
||||||
//!
|
//!
|
||||||
|
|
||||||
|
#![allow(dead_code)]
|
||||||
//! =======================
|
//! =======================
|
||||||
//! Writer state machine
|
//! Writer state machine
|
||||||
//! =======================
|
//! =======================
|
||||||
@@ -55,7 +57,7 @@ use std::io::{self, ErrorKind, Result};
|
|||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
|
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
|
||||||
use tracing::{debug, trace, warn};
|
use tracing::{debug, trace};
|
||||||
|
|
||||||
use crate::crypto::AesCtr;
|
use crate::crypto::AesCtr;
|
||||||
use super::state::{StreamState, YieldBuffer};
|
use super::state::{StreamState, YieldBuffer};
|
||||||
@@ -151,9 +153,9 @@ impl<R> CryptoReader<R> {
|
|||||||
fn take_poison_error(&mut self) -> io::Error {
|
fn take_poison_error(&mut self) -> io::Error {
|
||||||
match &mut self.state {
|
match &mut self.state {
|
||||||
CryptoReaderState::Poisoned { error } => error.take().unwrap_or_else(|| {
|
CryptoReaderState::Poisoned { error } => error.take().unwrap_or_else(|| {
|
||||||
io::Error::new(ErrorKind::Other, "stream previously poisoned")
|
io::Error::other("stream previously poisoned")
|
||||||
}),
|
}),
|
||||||
_ => io::Error::new(ErrorKind::Other, "stream not poisoned"),
|
_ => io::Error::other("stream not poisoned"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -166,6 +168,7 @@ impl<R: AsyncRead + Unpin> AsyncRead for CryptoReader<R> {
|
|||||||
) -> Poll<Result<()>> {
|
) -> Poll<Result<()>> {
|
||||||
let this = self.get_mut();
|
let this = self.get_mut();
|
||||||
|
|
||||||
|
#[allow(clippy::never_loop)]
|
||||||
loop {
|
loop {
|
||||||
match &mut this.state {
|
match &mut this.state {
|
||||||
CryptoReaderState::Poisoned { .. } => {
|
CryptoReaderState::Poisoned { .. } => {
|
||||||
@@ -333,22 +336,35 @@ impl PendingCiphertext {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn remaining_capacity(&self) -> usize {
|
fn remaining_capacity(&self) -> usize {
|
||||||
self.max_len.saturating_sub(self.buf.len())
|
self.max_len.saturating_sub(self.pending_len())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compact_consumed_prefix(&mut self) {
|
||||||
|
if self.pos == 0 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.pos >= self.buf.len() {
|
||||||
|
self.buf.clear();
|
||||||
|
self.pos = 0;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let _ = self.buf.split_to(self.pos);
|
||||||
|
self.pos = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn advance(&mut self, n: usize) {
|
fn advance(&mut self, n: usize) {
|
||||||
self.pos = (self.pos + n).min(self.buf.len());
|
self.pos = (self.pos + n).min(self.buf.len());
|
||||||
|
|
||||||
if self.pos == self.buf.len() {
|
if self.pos == self.buf.len() {
|
||||||
self.buf.clear();
|
self.compact_consumed_prefix();
|
||||||
self.pos = 0;
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compact when a large prefix was consumed.
|
// Compact when a large prefix was consumed.
|
||||||
if self.pos >= 16 * 1024 {
|
if self.pos >= 16 * 1024 {
|
||||||
let _ = self.buf.split_to(self.pos);
|
self.compact_consumed_prefix();
|
||||||
self.pos = 0;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -376,6 +392,11 @@ impl PendingCiphertext {
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Reclaim consumed prefix when physical storage is the only limiter.
|
||||||
|
if self.pos > 0 && self.buf.len() + plaintext.len() > self.max_len {
|
||||||
|
self.compact_consumed_prefix();
|
||||||
|
}
|
||||||
|
|
||||||
let start = self.buf.len();
|
let start = self.buf.len();
|
||||||
self.buf.reserve(plaintext.len());
|
self.buf.reserve(plaintext.len());
|
||||||
self.buf.extend_from_slice(plaintext);
|
self.buf.extend_from_slice(plaintext);
|
||||||
@@ -483,14 +504,14 @@ impl<W> CryptoWriter<W> {
|
|||||||
fn take_poison_error(&mut self) -> io::Error {
|
fn take_poison_error(&mut self) -> io::Error {
|
||||||
match &mut self.state {
|
match &mut self.state {
|
||||||
CryptoWriterState::Poisoned { error } => error.take().unwrap_or_else(|| {
|
CryptoWriterState::Poisoned { error } => error.take().unwrap_or_else(|| {
|
||||||
io::Error::new(ErrorKind::Other, "stream previously poisoned")
|
io::Error::other("stream previously poisoned")
|
||||||
}),
|
}),
|
||||||
_ => io::Error::new(ErrorKind::Other, "stream not poisoned"),
|
_ => io::Error::other("stream not poisoned"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ensure we are in Flushing state and return mutable pending buffer.
|
/// Ensure we are in Flushing state and return mutable pending buffer.
|
||||||
fn ensure_pending<'a>(state: &'a mut CryptoWriterState, max_pending: usize) -> &'a mut PendingCiphertext {
|
fn ensure_pending(state: &mut CryptoWriterState, max_pending: usize) -> &mut PendingCiphertext {
|
||||||
if matches!(state, CryptoWriterState::Idle) {
|
if matches!(state, CryptoWriterState::Idle) {
|
||||||
*state = CryptoWriterState::Flushing {
|
*state = CryptoWriterState::Flushing {
|
||||||
pending: PendingCiphertext::new(max_pending),
|
pending: PendingCiphertext::new(max_pending),
|
||||||
@@ -774,3 +795,70 @@ impl<S: AsyncWrite + Unpin> AsyncWrite for PassthroughStream<S> {
|
|||||||
Pin::new(&mut self.inner).poll_shutdown(cx)
|
Pin::new(&mut self.inner).poll_shutdown(cx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
fn test_ctr() -> AesCtr {
|
||||||
|
AesCtr::new(&[0x11; 32], 0x0102_0304_0506_0708_1112_1314_1516_1718)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn pending_capacity_reclaims_after_partial_advance_without_compaction_threshold() {
|
||||||
|
let mut pending = PendingCiphertext::new(1024);
|
||||||
|
let mut ctr = test_ctr();
|
||||||
|
let payload = vec![0x41; 900];
|
||||||
|
pending.push_encrypted(&mut ctr, &payload).unwrap();
|
||||||
|
|
||||||
|
// Keep position below compaction threshold to validate logical-capacity accounting.
|
||||||
|
pending.advance(800);
|
||||||
|
assert_eq!(pending.pending_len(), 100);
|
||||||
|
assert_eq!(pending.remaining_capacity(), 924);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn push_encrypted_respects_pending_limit() {
|
||||||
|
let mut pending = PendingCiphertext::new(64);
|
||||||
|
let mut ctr = test_ctr();
|
||||||
|
|
||||||
|
pending.push_encrypted(&mut ctr, &[0x10; 64]).unwrap();
|
||||||
|
let err = pending.push_encrypted(&mut ctr, &[0x20]).unwrap_err();
|
||||||
|
assert_eq!(err.kind(), ErrorKind::WouldBlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn push_encrypted_compacts_prefix_when_physical_buffer_would_overflow() {
|
||||||
|
let mut pending = PendingCiphertext::new(64);
|
||||||
|
let mut ctr = test_ctr();
|
||||||
|
|
||||||
|
pending.push_encrypted(&mut ctr, &[0x22; 60]).unwrap();
|
||||||
|
pending.advance(30);
|
||||||
|
pending.push_encrypted(&mut ctr, &[0x33; 30]).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(pending.pending_len(), 60);
|
||||||
|
assert!(pending.buf.len() <= 64);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn pending_ciphertext_preserves_stream_order_across_drain_and_append() {
|
||||||
|
let mut pending = PendingCiphertext::new(128);
|
||||||
|
let mut ctr = test_ctr();
|
||||||
|
|
||||||
|
let first = vec![0xA1; 80];
|
||||||
|
let second = vec![0xB2; 40];
|
||||||
|
|
||||||
|
pending.push_encrypted(&mut ctr, &first).unwrap();
|
||||||
|
pending.advance(50);
|
||||||
|
pending.push_encrypted(&mut ctr, &second).unwrap();
|
||||||
|
|
||||||
|
let mut baseline_ctr = test_ctr();
|
||||||
|
let mut baseline_plain = Vec::with_capacity(first.len() + second.len());
|
||||||
|
baseline_plain.extend_from_slice(&first);
|
||||||
|
baseline_plain.extend_from_slice(&second);
|
||||||
|
baseline_ctr.apply(&mut baseline_plain);
|
||||||
|
|
||||||
|
let expected = &baseline_plain[50..];
|
||||||
|
assert_eq!(pending.pending_slice(), expected);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,6 +3,8 @@
|
|||||||
//! This module defines the common types and traits used by all
|
//! This module defines the common types and traits used by all
|
||||||
//! frame encoding/decoding implementations.
|
//! frame encoding/decoding implementations.
|
||||||
|
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use bytes::{Bytes, BytesMut};
|
use bytes::{Bytes, BytesMut};
|
||||||
use std::io::Result;
|
use std::io::Result;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|||||||
@@ -3,6 +3,8 @@
|
|||||||
//! This module provides Encoder/Decoder implementations compatible
|
//! This module provides Encoder/Decoder implementations compatible
|
||||||
//! with tokio-util's Framed wrapper for easy async frame I/O.
|
//! with tokio-util's Framed wrapper for easy async frame I/O.
|
||||||
|
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use bytes::{Bytes, BytesMut, BufMut};
|
use bytes::{Bytes, BytesMut, BufMut};
|
||||||
use std::io::{self, Error, ErrorKind};
|
use std::io::{self, Error, ErrorKind};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@@ -137,7 +139,7 @@ fn encode_abridged(frame: &Frame, dst: &mut BytesMut) -> io::Result<()> {
|
|||||||
let data = &frame.data;
|
let data = &frame.data;
|
||||||
|
|
||||||
// Validate alignment
|
// Validate alignment
|
||||||
if data.len() % 4 != 0 {
|
if !data.len().is_multiple_of(4) {
|
||||||
return Err(Error::new(
|
return Err(Error::new(
|
||||||
ErrorKind::InvalidInput,
|
ErrorKind::InvalidInput,
|
||||||
format!("abridged frame must be 4-byte aligned, got {} bytes", data.len())
|
format!("abridged frame must be 4-byte aligned, got {} bytes", data.len())
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
//! MTProto frame stream wrappers
|
//! MTProto frame stream wrappers
|
||||||
|
|
||||||
use bytes::{Bytes, BytesMut};
|
#![allow(dead_code)]
|
||||||
|
|
||||||
|
use bytes::Bytes;
|
||||||
use std::io::{Error, ErrorKind, Result};
|
use std::io::{Error, ErrorKind, Result};
|
||||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncReadExt, AsyncWriteExt};
|
use tokio::io::{AsyncRead, AsyncWrite, AsyncReadExt, AsyncWriteExt};
|
||||||
use crate::protocol::constants::*;
|
use crate::protocol::constants::*;
|
||||||
@@ -76,7 +78,7 @@ impl<W> AbridgedFrameWriter<W> {
|
|||||||
impl<W: AsyncWrite + Unpin> AbridgedFrameWriter<W> {
|
impl<W: AsyncWrite + Unpin> AbridgedFrameWriter<W> {
|
||||||
/// Write a frame
|
/// Write a frame
|
||||||
pub async fn write_frame(&mut self, data: &[u8], meta: &FrameMeta) -> Result<()> {
|
pub async fn write_frame(&mut self, data: &[u8], meta: &FrameMeta) -> Result<()> {
|
||||||
if data.len() % 4 != 0 {
|
if !data.len().is_multiple_of(4) {
|
||||||
return Err(Error::new(
|
return Err(Error::new(
|
||||||
ErrorKind::InvalidInput,
|
ErrorKind::InvalidInput,
|
||||||
format!("Abridged frame must be aligned to 4 bytes, got {}", data.len()),
|
format!("Abridged frame must be aligned to 4 bytes, got {}", data.len()),
|
||||||
@@ -329,7 +331,7 @@ impl<R: AsyncRead + Unpin> MtprotoFrameReader<R> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Validate length
|
// Validate length
|
||||||
if len < MIN_MSG_LEN || len > MAX_MSG_LEN || len % PADDING_FILLER.len() != 0 {
|
if !(MIN_MSG_LEN..=MAX_MSG_LEN).contains(&len) || !len.is_multiple_of(PADDING_FILLER.len()) {
|
||||||
return Err(Error::new(
|
return Err(Error::new(
|
||||||
ErrorKind::InvalidData,
|
ErrorKind::InvalidData,
|
||||||
format!("Invalid message length: {}", len),
|
format!("Invalid message length: {}", len),
|
||||||
|
|||||||
@@ -12,28 +12,34 @@ pub mod frame_codec;
|
|||||||
pub mod frame_stream;
|
pub mod frame_stream;
|
||||||
|
|
||||||
// Re-export state machine types
|
// Re-export state machine types
|
||||||
|
#[allow(unused_imports)]
|
||||||
pub use state::{
|
pub use state::{
|
||||||
StreamState, Transition, PollResult,
|
StreamState, Transition, PollResult,
|
||||||
ReadBuffer, WriteBuffer, HeaderBuffer, YieldBuffer,
|
ReadBuffer, WriteBuffer, HeaderBuffer, YieldBuffer,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Re-export buffer pool
|
// Re-export buffer pool
|
||||||
|
#[allow(unused_imports)]
|
||||||
pub use buffer_pool::{BufferPool, PooledBuffer, PoolStats};
|
pub use buffer_pool::{BufferPool, PooledBuffer, PoolStats};
|
||||||
|
|
||||||
// Re-export stream implementations
|
// Re-export stream implementations
|
||||||
|
#[allow(unused_imports)]
|
||||||
pub use crypto_stream::{CryptoReader, CryptoWriter, PassthroughStream};
|
pub use crypto_stream::{CryptoReader, CryptoWriter, PassthroughStream};
|
||||||
pub use tls_stream::{FakeTlsReader, FakeTlsWriter};
|
pub use tls_stream::{FakeTlsReader, FakeTlsWriter};
|
||||||
|
|
||||||
// Re-export frame types
|
// Re-export frame types
|
||||||
|
#[allow(unused_imports)]
|
||||||
pub use frame::{Frame, FrameMeta, FrameCodec as FrameCodecTrait, create_codec};
|
pub use frame::{Frame, FrameMeta, FrameCodec as FrameCodecTrait, create_codec};
|
||||||
|
|
||||||
// Re-export tokio-util compatible codecs
|
// Re-export tokio-util compatible codecs
|
||||||
|
#[allow(unused_imports)]
|
||||||
pub use frame_codec::{
|
pub use frame_codec::{
|
||||||
FrameCodec,
|
FrameCodec,
|
||||||
AbridgedCodec, IntermediateCodec, SecureCodec,
|
AbridgedCodec, IntermediateCodec, SecureCodec,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Legacy re-exports for compatibility
|
// Legacy re-exports for compatibility
|
||||||
|
#[allow(unused_imports)]
|
||||||
pub use frame_stream::{
|
pub use frame_stream::{
|
||||||
AbridgedFrameReader, AbridgedFrameWriter,
|
AbridgedFrameReader, AbridgedFrameWriter,
|
||||||
IntermediateFrameReader, IntermediateFrameWriter,
|
IntermediateFrameReader, IntermediateFrameWriter,
|
||||||
|
|||||||
@@ -3,6 +3,8 @@
|
|||||||
//! This module provides core types and traits for implementing
|
//! This module provides core types and traits for implementing
|
||||||
//! stateful async streams with proper partial read/write handling.
|
//! stateful async streams with proper partial read/write handling.
|
||||||
|
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use bytes::{Bytes, BytesMut};
|
use bytes::{Bytes, BytesMut};
|
||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
|
|||||||
@@ -18,6 +18,8 @@
|
|||||||
//! - Explicit state machines for all async operations
|
//! - Explicit state machines for all async operations
|
||||||
//! - Never lose data on partial reads
|
//! - Never lose data on partial reads
|
||||||
//! - Atomic TLS record formation for writes
|
//! - Atomic TLS record formation for writes
|
||||||
|
|
||||||
|
#![allow(dead_code)]
|
||||||
//! - Proper handling of all TLS record types
|
//! - Proper handling of all TLS record types
|
||||||
//!
|
//!
|
||||||
//! Important nuance (Telegram FakeTLS):
|
//! Important nuance (Telegram FakeTLS):
|
||||||
@@ -133,7 +135,7 @@ impl TlsRecordHeader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Build header bytes
|
/// Build header bytes
|
||||||
fn to_bytes(&self) -> [u8; 5] {
|
fn to_bytes(self) -> [u8; 5] {
|
||||||
[
|
[
|
||||||
self.record_type,
|
self.record_type,
|
||||||
self.version[0],
|
self.version[0],
|
||||||
@@ -258,9 +260,9 @@ impl<R> FakeTlsReader<R> {
|
|||||||
fn take_poison_error(&mut self) -> io::Error {
|
fn take_poison_error(&mut self) -> io::Error {
|
||||||
match &mut self.state {
|
match &mut self.state {
|
||||||
TlsReaderState::Poisoned { error } => error.take().unwrap_or_else(|| {
|
TlsReaderState::Poisoned { error } => error.take().unwrap_or_else(|| {
|
||||||
io::Error::new(ErrorKind::Other, "stream previously poisoned")
|
io::Error::other("stream previously poisoned")
|
||||||
}),
|
}),
|
||||||
_ => io::Error::new(ErrorKind::Other, "stream not poisoned"),
|
_ => io::Error::other("stream not poisoned"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -295,7 +297,7 @@ impl<R: AsyncRead + Unpin> AsyncRead for FakeTlsReader<R> {
|
|||||||
TlsReaderState::Poisoned { error } => {
|
TlsReaderState::Poisoned { error } => {
|
||||||
this.state = TlsReaderState::Poisoned { error: None };
|
this.state = TlsReaderState::Poisoned { error: None };
|
||||||
let err = error.unwrap_or_else(|| {
|
let err = error.unwrap_or_else(|| {
|
||||||
io::Error::new(ErrorKind::Other, "stream previously poisoned")
|
io::Error::other("stream previously poisoned")
|
||||||
});
|
});
|
||||||
return Poll::Ready(Err(err));
|
return Poll::Ready(Err(err));
|
||||||
}
|
}
|
||||||
@@ -614,9 +616,9 @@ impl<W> FakeTlsWriter<W> {
|
|||||||
fn take_poison_error(&mut self) -> io::Error {
|
fn take_poison_error(&mut self) -> io::Error {
|
||||||
match &mut self.state {
|
match &mut self.state {
|
||||||
TlsWriterState::Poisoned { error } => error.take().unwrap_or_else(|| {
|
TlsWriterState::Poisoned { error } => error.take().unwrap_or_else(|| {
|
||||||
io::Error::new(ErrorKind::Other, "stream previously poisoned")
|
io::Error::other("stream previously poisoned")
|
||||||
}),
|
}),
|
||||||
_ => io::Error::new(ErrorKind::Other, "stream not poisoned"),
|
_ => io::Error::other("stream not poisoned"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -680,7 +682,7 @@ impl<W: AsyncWrite + Unpin> AsyncWrite for FakeTlsWriter<W> {
|
|||||||
TlsWriterState::Poisoned { error } => {
|
TlsWriterState::Poisoned { error } => {
|
||||||
this.state = TlsWriterState::Poisoned { error: None };
|
this.state = TlsWriterState::Poisoned { error: None };
|
||||||
let err = error.unwrap_or_else(|| {
|
let err = error.unwrap_or_else(|| {
|
||||||
Error::new(ErrorKind::Other, "stream previously poisoned")
|
Error::other("stream previously poisoned")
|
||||||
});
|
});
|
||||||
return Poll::Ready(Err(err));
|
return Poll::Ready(Err(err));
|
||||||
}
|
}
|
||||||
@@ -769,7 +771,7 @@ impl<W: AsyncWrite + Unpin> AsyncWrite for FakeTlsWriter<W> {
|
|||||||
TlsWriterState::Poisoned { error } => {
|
TlsWriterState::Poisoned { error } => {
|
||||||
this.state = TlsWriterState::Poisoned { error: None };
|
this.state = TlsWriterState::Poisoned { error: None };
|
||||||
let err = error.unwrap_or_else(|| {
|
let err = error.unwrap_or_else(|| {
|
||||||
Error::new(ErrorKind::Other, "stream previously poisoned")
|
Error::other("stream previously poisoned")
|
||||||
});
|
});
|
||||||
return Poll::Ready(Err(err));
|
return Poll::Ready(Err(err));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
//! Stream traits and common types
|
//! Stream traits and common types
|
||||||
|
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use std::io::Result;
|
use std::io::Result;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ pub struct TlsFrontCache {
|
|||||||
disk_path: PathBuf,
|
disk_path: PathBuf,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
impl TlsFrontCache {
|
impl TlsFrontCache {
|
||||||
pub fn new(domains: &[String], default_len: usize, disk_path: impl AsRef<Path>) -> Self {
|
pub fn new(domains: &[String], default_len: usize, disk_path: impl AsRef<Path>) -> Self {
|
||||||
let default_template = ParsedServerHello {
|
let default_template = ParsedServerHello {
|
||||||
@@ -114,8 +115,9 @@ impl TlsFrontCache {
|
|||||||
if !name.ends_with(".json") {
|
if !name.ends_with(".json") {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if let Ok(data) = tokio::fs::read(entry.path()).await {
|
if let Ok(data) = tokio::fs::read(entry.path()).await
|
||||||
if let Ok(mut cached) = serde_json::from_slice::<CachedTlsData>(&data) {
|
&& let Ok(mut cached) = serde_json::from_slice::<CachedTlsData>(&data)
|
||||||
|
{
|
||||||
if cached.domain.is_empty()
|
if cached.domain.is_empty()
|
||||||
|| cached.domain.len() > 255
|
|| cached.domain.len() > 255
|
||||||
|| !cached.domain.chars().all(|c| c.is_ascii_alphanumeric() || c == '.' || c == '-')
|
|| !cached.domain.chars().all(|c| c.is_ascii_alphanumeric() || c == '.' || c == '-')
|
||||||
@@ -124,18 +126,18 @@ impl TlsFrontCache {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
// fetched_at is skipped during deserialization; approximate with file mtime if available.
|
// fetched_at is skipped during deserialization; approximate with file mtime if available.
|
||||||
if let Ok(meta) = entry.metadata().await {
|
if let Ok(meta) = entry.metadata().await
|
||||||
if let Ok(modified) = meta.modified() {
|
&& let Ok(modified) = meta.modified()
|
||||||
|
{
|
||||||
cached.fetched_at = modified;
|
cached.fetched_at = modified;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// Drop entries older than 72h
|
// Drop entries older than 72h
|
||||||
if let Ok(age) = cached.fetched_at.elapsed() {
|
if let Ok(age) = cached.fetched_at.elapsed()
|
||||||
if age > Duration::from_secs(72 * 3600) {
|
&& age > Duration::from_secs(72 * 3600)
|
||||||
|
{
|
||||||
warn!(domain = %cached.domain, "Skipping stale TLS cache entry (>72h)");
|
warn!(domain = %cached.domain, "Skipping stale TLS cache entry (>72h)");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
let domain = cached.domain.clone();
|
let domain = cached.domain.clone();
|
||||||
self.set(&domain, cached).await;
|
self.set(&domain, cached).await;
|
||||||
loaded += 1;
|
loaded += 1;
|
||||||
@@ -143,7 +145,6 @@ impl TlsFrontCache {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if loaded > 0 {
|
if loaded > 0 {
|
||||||
info!(count = loaded, "Loaded TLS cache entries from disk");
|
info!(count = loaded, "Loaded TLS cache entries from disk");
|
||||||
}
|
}
|
||||||
@@ -173,7 +174,7 @@ impl TlsFrontCache {
|
|||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
loop {
|
loop {
|
||||||
for domain in &domains {
|
for domain in &domains {
|
||||||
fetcher(domain.clone()).await;
|
let _ = fetcher(domain.clone()).await;
|
||||||
}
|
}
|
||||||
sleep(interval).await;
|
sleep(interval).await;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ fn jitter_and_clamp_sizes(sizes: &[usize], rng: &SecureRandom) -> Vec<usize> {
|
|||||||
sizes
|
sizes
|
||||||
.iter()
|
.iter()
|
||||||
.map(|&size| {
|
.map(|&size| {
|
||||||
let base = size.max(MIN_APP_DATA).min(MAX_APP_DATA);
|
let base = size.clamp(MIN_APP_DATA, MAX_APP_DATA);
|
||||||
let jitter_range = ((base as f64) * 0.03).round() as i64;
|
let jitter_range = ((base as f64) * 0.03).round() as i64;
|
||||||
if jitter_range == 0 {
|
if jitter_range == 0 {
|
||||||
return base;
|
return base;
|
||||||
@@ -50,7 +50,7 @@ fn ensure_payload_capacity(mut sizes: Vec<usize>, payload_len: usize) -> Vec<usi
|
|||||||
|
|
||||||
while body_total < payload_len {
|
while body_total < payload_len {
|
||||||
let remaining = payload_len - body_total;
|
let remaining = payload_len - body_total;
|
||||||
let chunk = (remaining + 17).min(MAX_APP_DATA).max(MIN_APP_DATA);
|
let chunk = (remaining + 17).clamp(MIN_APP_DATA, MAX_APP_DATA);
|
||||||
sizes.push(chunk);
|
sizes.push(chunk);
|
||||||
body_total += chunk.saturating_sub(17);
|
body_total += chunk.saturating_sub(17);
|
||||||
}
|
}
|
||||||
@@ -189,7 +189,7 @@ pub fn build_emulated_server_hello(
|
|||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|payload| payload.certificate_message.as_slice())
|
.map(|payload| payload.certificate_message.as_slice())
|
||||||
.filter(|payload| !payload.is_empty())
|
.filter(|payload| !payload.is_empty())
|
||||||
.or_else(|| compact_payload.as_deref())
|
.or(compact_payload.as_deref())
|
||||||
} else {
|
} else {
|
||||||
compact_payload.as_deref()
|
compact_payload.as_deref()
|
||||||
};
|
};
|
||||||
@@ -223,8 +223,7 @@ pub fn build_emulated_server_hello(
|
|||||||
} else {
|
} else {
|
||||||
rec.extend_from_slice(&rng.bytes(size));
|
rec.extend_from_slice(&rng.bytes(size));
|
||||||
}
|
}
|
||||||
} else {
|
} else if size > 17 {
|
||||||
if size > 17 {
|
|
||||||
let body_len = size - 17;
|
let body_len = size - 17;
|
||||||
rec.extend_from_slice(&rng.bytes(body_len));
|
rec.extend_from_slice(&rng.bytes(body_len));
|
||||||
rec.push(0x16); // inner content type marker (handshake)
|
rec.push(0x16); // inner content type marker (handshake)
|
||||||
@@ -232,7 +231,6 @@ pub fn build_emulated_server_hello(
|
|||||||
} else {
|
} else {
|
||||||
rec.extend_from_slice(&rng.bytes(size));
|
rec.extend_from_slice(&rng.bytes(size));
|
||||||
}
|
}
|
||||||
}
|
|
||||||
app_data.extend_from_slice(&rec);
|
app_data.extend_from_slice(&rec);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,8 +2,10 @@ use std::sync::Arc;
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use anyhow::{Result, anyhow};
|
use anyhow::{Result, anyhow};
|
||||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||||
use tokio::net::TcpStream;
|
use tokio::net::TcpStream;
|
||||||
|
#[cfg(unix)]
|
||||||
|
use tokio::net::UnixStream;
|
||||||
use tokio::time::timeout;
|
use tokio::time::timeout;
|
||||||
use tokio_rustls::client::TlsStream;
|
use tokio_rustls::client::TlsStream;
|
||||||
use tokio_rustls::TlsConnector;
|
use tokio_rustls::TlsConnector;
|
||||||
@@ -18,7 +20,9 @@ use x509_parser::prelude::FromDer;
|
|||||||
use x509_parser::certificate::X509Certificate;
|
use x509_parser::certificate::X509Certificate;
|
||||||
|
|
||||||
use crate::crypto::SecureRandom;
|
use crate::crypto::SecureRandom;
|
||||||
|
use crate::network::dns_overrides::resolve_socket_addr;
|
||||||
use crate::protocol::constants::{TLS_RECORD_APPLICATION, TLS_RECORD_HANDSHAKE};
|
use crate::protocol::constants::{TLS_RECORD_APPLICATION, TLS_RECORD_HANDSHAKE};
|
||||||
|
use crate::transport::proxy_protocol::{ProxyProtocolV1Builder, ProxyProtocolV2Builder};
|
||||||
use crate::tls_front::types::{
|
use crate::tls_front::types::{
|
||||||
ParsedCertificateInfo,
|
ParsedCertificateInfo,
|
||||||
ParsedServerHello,
|
ParsedServerHello,
|
||||||
@@ -210,7 +214,10 @@ fn gen_key_share(rng: &SecureRandom) -> [u8; 32] {
|
|||||||
key
|
key
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn read_tls_record(stream: &mut TcpStream) -> Result<(u8, Vec<u8>)> {
|
async fn read_tls_record<S>(stream: &mut S) -> Result<(u8, Vec<u8>)>
|
||||||
|
where
|
||||||
|
S: AsyncRead + Unpin,
|
||||||
|
{
|
||||||
let mut header = [0u8; 5];
|
let mut header = [0u8; 5];
|
||||||
stream.read_exact(&mut header).await?;
|
stream.read_exact(&mut header).await?;
|
||||||
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
|
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
|
||||||
@@ -332,6 +339,55 @@ fn u24_bytes(value: usize) -> Option<[u8; 3]> {
|
|||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn connect_with_dns_override(
|
||||||
|
host: &str,
|
||||||
|
port: u16,
|
||||||
|
connect_timeout: Duration,
|
||||||
|
) -> Result<TcpStream> {
|
||||||
|
if let Some(addr) = resolve_socket_addr(host, port) {
|
||||||
|
return Ok(timeout(connect_timeout, TcpStream::connect(addr)).await??);
|
||||||
|
}
|
||||||
|
Ok(timeout(connect_timeout, TcpStream::connect((host, port))).await??)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn connect_tcp_with_upstream(
|
||||||
|
host: &str,
|
||||||
|
port: u16,
|
||||||
|
connect_timeout: Duration,
|
||||||
|
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
|
||||||
|
) -> Result<TcpStream> {
|
||||||
|
if let Some(manager) = upstream {
|
||||||
|
if let Some(addr) = resolve_socket_addr(host, port) {
|
||||||
|
match manager.connect(addr, None, None).await {
|
||||||
|
Ok(stream) => return Ok(stream),
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
host = %host,
|
||||||
|
port = port,
|
||||||
|
error = %e,
|
||||||
|
"Upstream connect failed, using direct connect"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if let Ok(mut addrs) = tokio::net::lookup_host((host, port)).await {
|
||||||
|
if let Some(addr) = addrs.find(|a| a.is_ipv4()) {
|
||||||
|
match manager.connect(addr, None, None).await {
|
||||||
|
Ok(stream) => return Ok(stream),
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
host = %host,
|
||||||
|
port = port,
|
||||||
|
error = %e,
|
||||||
|
"Upstream connect failed, using direct connect"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
connect_with_dns_override(host, port, connect_timeout).await
|
||||||
|
}
|
||||||
|
|
||||||
fn encode_tls13_certificate_message(cert_chain_der: &[Vec<u8>]) -> Option<Vec<u8>> {
|
fn encode_tls13_certificate_message(cert_chain_der: &[Vec<u8>]) -> Option<Vec<u8>> {
|
||||||
if cert_chain_der.is_empty() {
|
if cert_chain_der.is_empty() {
|
||||||
return None;
|
return None;
|
||||||
@@ -361,18 +417,25 @@ fn encode_tls13_certificate_message(cert_chain_der: &[Vec<u8>]) -> Option<Vec<u8
|
|||||||
Some(message)
|
Some(message)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn fetch_via_raw_tls(
|
async fn fetch_via_raw_tls_stream<S>(
|
||||||
host: &str,
|
mut stream: S,
|
||||||
port: u16,
|
|
||||||
sni: &str,
|
sni: &str,
|
||||||
connect_timeout: Duration,
|
connect_timeout: Duration,
|
||||||
) -> Result<TlsFetchResult> {
|
proxy_protocol: u8,
|
||||||
let addr = format!("{host}:{port}");
|
) -> Result<TlsFetchResult>
|
||||||
let mut stream = timeout(connect_timeout, TcpStream::connect(addr)).await??;
|
where
|
||||||
|
S: AsyncRead + AsyncWrite + Unpin,
|
||||||
|
{
|
||||||
let rng = SecureRandom::new();
|
let rng = SecureRandom::new();
|
||||||
let client_hello = build_client_hello(sni, &rng);
|
let client_hello = build_client_hello(sni, &rng);
|
||||||
timeout(connect_timeout, async {
|
timeout(connect_timeout, async {
|
||||||
|
if proxy_protocol > 0 {
|
||||||
|
let header = match proxy_protocol {
|
||||||
|
2 => ProxyProtocolV2Builder::new().build(),
|
||||||
|
_ => ProxyProtocolV1Builder::new().build(),
|
||||||
|
};
|
||||||
|
stream.write_all(&header).await?;
|
||||||
|
}
|
||||||
stream.write_all(&client_hello).await?;
|
stream.write_all(&client_hello).await?;
|
||||||
stream.flush().await?;
|
stream.flush().await?;
|
||||||
Ok::<(), std::io::Error>(())
|
Ok::<(), std::io::Error>(())
|
||||||
@@ -384,7 +447,7 @@ async fn fetch_via_raw_tls(
|
|||||||
for _ in 0..4 {
|
for _ in 0..4 {
|
||||||
match timeout(connect_timeout, read_tls_record(&mut stream)).await {
|
match timeout(connect_timeout, read_tls_record(&mut stream)).await {
|
||||||
Ok(Ok(rec)) => records.push(rec),
|
Ok(Ok(rec)) => records.push(rec),
|
||||||
Ok(Err(e)) => return Err(e.into()),
|
Ok(Err(e)) => return Err(e),
|
||||||
Err(_) => break,
|
Err(_) => break,
|
||||||
}
|
}
|
||||||
if records.len() >= 3 && records.iter().any(|(t, _)| *t == TLS_RECORD_APPLICATION) {
|
if records.len() >= 3 && records.iter().any(|(t, _)| *t == TLS_RECORD_APPLICATION) {
|
||||||
@@ -418,34 +481,69 @@ async fn fetch_via_raw_tls(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn fetch_via_rustls(
|
async fn fetch_via_raw_tls(
|
||||||
host: &str,
|
host: &str,
|
||||||
port: u16,
|
port: u16,
|
||||||
sni: &str,
|
sni: &str,
|
||||||
connect_timeout: Duration,
|
connect_timeout: Duration,
|
||||||
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
|
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
|
||||||
|
proxy_protocol: u8,
|
||||||
|
unix_sock: Option<&str>,
|
||||||
) -> Result<TlsFetchResult> {
|
) -> Result<TlsFetchResult> {
|
||||||
|
#[cfg(unix)]
|
||||||
|
if let Some(sock_path) = unix_sock {
|
||||||
|
match timeout(connect_timeout, UnixStream::connect(sock_path)).await {
|
||||||
|
Ok(Ok(stream)) => {
|
||||||
|
debug!(
|
||||||
|
sni = %sni,
|
||||||
|
sock = %sock_path,
|
||||||
|
"Raw TLS fetch using mask unix socket"
|
||||||
|
);
|
||||||
|
return fetch_via_raw_tls_stream(stream, sni, connect_timeout, proxy_protocol).await;
|
||||||
|
}
|
||||||
|
Ok(Err(e)) => {
|
||||||
|
warn!(
|
||||||
|
sni = %sni,
|
||||||
|
sock = %sock_path,
|
||||||
|
error = %e,
|
||||||
|
"Raw TLS unix socket connect failed, falling back to TCP"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
warn!(
|
||||||
|
sni = %sni,
|
||||||
|
sock = %sock_path,
|
||||||
|
"Raw TLS unix socket connect timed out, falling back to TCP"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(unix))]
|
||||||
|
let _ = unix_sock;
|
||||||
|
|
||||||
|
let stream = connect_tcp_with_upstream(host, port, connect_timeout, upstream).await?;
|
||||||
|
fetch_via_raw_tls_stream(stream, sni, connect_timeout, proxy_protocol).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn fetch_via_rustls_stream<S>(
|
||||||
|
mut stream: S,
|
||||||
|
host: &str,
|
||||||
|
sni: &str,
|
||||||
|
proxy_protocol: u8,
|
||||||
|
) -> Result<TlsFetchResult>
|
||||||
|
where
|
||||||
|
S: AsyncRead + AsyncWrite + Unpin,
|
||||||
|
{
|
||||||
// rustls handshake path for certificate and basic negotiated metadata.
|
// rustls handshake path for certificate and basic negotiated metadata.
|
||||||
let stream = if let Some(manager) = upstream {
|
if proxy_protocol > 0 {
|
||||||
// Resolve host to SocketAddr
|
let header = match proxy_protocol {
|
||||||
if let Ok(mut addrs) = tokio::net::lookup_host((host, port)).await {
|
2 => ProxyProtocolV2Builder::new().build(),
|
||||||
if let Some(addr) = addrs.find(|a| a.is_ipv4()) {
|
_ => ProxyProtocolV1Builder::new().build(),
|
||||||
match manager.connect(addr, None, None).await {
|
|
||||||
Ok(s) => s,
|
|
||||||
Err(e) => {
|
|
||||||
warn!(sni = %sni, error = %e, "Upstream connect failed, using direct connect");
|
|
||||||
timeout(connect_timeout, TcpStream::connect((host, port))).await??
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
timeout(connect_timeout, TcpStream::connect((host, port))).await??
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
timeout(connect_timeout, TcpStream::connect((host, port))).await??
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
timeout(connect_timeout, TcpStream::connect((host, port))).await??
|
|
||||||
};
|
};
|
||||||
|
stream.write_all(&header).await?;
|
||||||
|
stream.flush().await?;
|
||||||
|
}
|
||||||
|
|
||||||
let config = build_client_config();
|
let config = build_client_config();
|
||||||
let connector = TlsConnector::from(config);
|
let connector = TlsConnector::from(config);
|
||||||
@@ -454,7 +552,7 @@ async fn fetch_via_rustls(
|
|||||||
.or_else(|_| ServerName::try_from(host.to_owned()))
|
.or_else(|_| ServerName::try_from(host.to_owned()))
|
||||||
.map_err(|_| RustlsError::General("invalid SNI".into()))?;
|
.map_err(|_| RustlsError::General("invalid SNI".into()))?;
|
||||||
|
|
||||||
let tls_stream: TlsStream<TcpStream> = connector.connect(server_name, stream).await?;
|
let tls_stream: TlsStream<S> = connector.connect(server_name, stream).await?;
|
||||||
|
|
||||||
// Extract negotiated parameters and certificates
|
// Extract negotiated parameters and certificates
|
||||||
let (_io, session) = tls_stream.get_ref();
|
let (_io, session) = tls_stream.get_ref();
|
||||||
@@ -515,6 +613,51 @@ async fn fetch_via_rustls(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn fetch_via_rustls(
|
||||||
|
host: &str,
|
||||||
|
port: u16,
|
||||||
|
sni: &str,
|
||||||
|
connect_timeout: Duration,
|
||||||
|
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
|
||||||
|
proxy_protocol: u8,
|
||||||
|
unix_sock: Option<&str>,
|
||||||
|
) -> Result<TlsFetchResult> {
|
||||||
|
#[cfg(unix)]
|
||||||
|
if let Some(sock_path) = unix_sock {
|
||||||
|
match timeout(connect_timeout, UnixStream::connect(sock_path)).await {
|
||||||
|
Ok(Ok(stream)) => {
|
||||||
|
debug!(
|
||||||
|
sni = %sni,
|
||||||
|
sock = %sock_path,
|
||||||
|
"Rustls fetch using mask unix socket"
|
||||||
|
);
|
||||||
|
return fetch_via_rustls_stream(stream, host, sni, proxy_protocol).await;
|
||||||
|
}
|
||||||
|
Ok(Err(e)) => {
|
||||||
|
warn!(
|
||||||
|
sni = %sni,
|
||||||
|
sock = %sock_path,
|
||||||
|
error = %e,
|
||||||
|
"Rustls unix socket connect failed, falling back to TCP"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
warn!(
|
||||||
|
sni = %sni,
|
||||||
|
sock = %sock_path,
|
||||||
|
"Rustls unix socket connect timed out, falling back to TCP"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(unix))]
|
||||||
|
let _ = unix_sock;
|
||||||
|
|
||||||
|
let stream = connect_tcp_with_upstream(host, port, connect_timeout, upstream).await?;
|
||||||
|
fetch_via_rustls_stream(stream, host, sni, proxy_protocol).await
|
||||||
|
}
|
||||||
|
|
||||||
/// Fetch real TLS metadata for the given SNI.
|
/// Fetch real TLS metadata for the given SNI.
|
||||||
///
|
///
|
||||||
/// Strategy:
|
/// Strategy:
|
||||||
@@ -527,8 +670,20 @@ pub async fn fetch_real_tls(
|
|||||||
sni: &str,
|
sni: &str,
|
||||||
connect_timeout: Duration,
|
connect_timeout: Duration,
|
||||||
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
|
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
|
||||||
|
proxy_protocol: u8,
|
||||||
|
unix_sock: Option<&str>,
|
||||||
) -> Result<TlsFetchResult> {
|
) -> Result<TlsFetchResult> {
|
||||||
let raw_result = match fetch_via_raw_tls(host, port, sni, connect_timeout).await {
|
let raw_result = match fetch_via_raw_tls(
|
||||||
|
host,
|
||||||
|
port,
|
||||||
|
sni,
|
||||||
|
connect_timeout,
|
||||||
|
upstream.clone(),
|
||||||
|
proxy_protocol,
|
||||||
|
unix_sock,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
Ok(res) => Some(res),
|
Ok(res) => Some(res),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(sni = %sni, error = %e, "Raw TLS fetch failed");
|
warn!(sni = %sni, error = %e, "Raw TLS fetch failed");
|
||||||
@@ -536,7 +691,17 @@ pub async fn fetch_real_tls(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
match fetch_via_rustls(host, port, sni, connect_timeout, upstream).await {
|
match fetch_via_rustls(
|
||||||
|
host,
|
||||||
|
port,
|
||||||
|
sni,
|
||||||
|
connect_timeout,
|
||||||
|
upstream,
|
||||||
|
proxy_protocol,
|
||||||
|
unix_sock,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
Ok(rustls_result) => {
|
Ok(rustls_result) => {
|
||||||
if let Some(mut raw) = raw_result {
|
if let Some(mut raw) = raw_result {
|
||||||
raw.cert_info = rustls_result.cert_info;
|
raw.cert_info = rustls_result.cert_info;
|
||||||
|
|||||||
@@ -4,4 +4,5 @@ pub mod fetcher;
|
|||||||
pub mod emulator;
|
pub mod emulator;
|
||||||
|
|
||||||
pub use cache::TlsFrontCache;
|
pub use cache::TlsFrontCache;
|
||||||
|
#[allow(unused_imports)]
|
||||||
pub use types::{CachedTlsData, TlsFetchResult};
|
pub use types::{CachedTlsData, TlsFetchResult};
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user