Compare commits

...

78 Commits

Author SHA1 Message Date
Alexey
0040e9b6da Merge pull request #442 from telemt/bump
Update Cargo.toml
2026-03-16 21:25:44 +03:00
Alexey
2c10560795 Update Cargo.toml 2026-03-16 21:25:14 +03:00
Alexey
5eff38eb82 Merge pull request #441 from kavore/feat/configurable-max-connections
feat: configurable max_connections limit
2026-03-16 21:17:21 +03:00
kavore
b6206a6dfe feat: make max_connections configurable via [server] section
The concurrent connection limit was hardcoded to 10,000.
Add server.max_connections config option (default: 10000, 0 = unlimited).
2026-03-16 20:40:10 +03:00
Alexey
4d8a5ca174 Merge pull request #436 from Dimasssss/patch-1
Update QUICK_START_GUIDE
2026-03-16 13:49:31 +03:00
Dimasssss
0ae67db492 Update QUICK_START_GUIDE.en.md 2026-03-16 13:40:50 +03:00
Dimasssss
c4f77814ee Update QUICK_START_GUIDE.ru.md 2026-03-16 13:40:20 +03:00
Alexey
92972ab6bf Merge pull request #433 from Linaro1985/feat_data_path
add support for data path option
2026-03-16 10:54:35 +03:00
Maxim Anisimov
c351e08c43 add support for data path option
This commit adds support for configuring the data path via a
configuration file or command-line option. This may be useful
on systems without systemd, such as OpenWrt or Alpine Linux.

Signed-off-by: Maxim Anisimov <maxim.anisimov.ua@gmail.com>
2026-03-16 10:01:59 +03:00
Alexey
e29855c8c6 Merge pull request #432 from telemt/readme
Update README.md
2026-03-15 23:00:35 +03:00
Alexey
3634fbd7e8 Update README.md 2026-03-15 23:00:17 +03:00
Alexey
bb29797bfb Merge pull request #429 from Dimasssss/patch-1
Update FAQ.ru.md
2026-03-15 22:44:22 +03:00
Dimasssss
3d5af3d248 Update FAQ.en.md 2026-03-15 19:09:02 +03:00
Dimasssss
2d7df3da6c Update FAQ.ru.md 2026-03-15 19:06:56 +03:00
Alexey
4abc0e5134 ME Draining Writers threshold + Inherited per-user unique IP limit: merge pull request #426 from telemt/flow
ME Draining Writers threshold + Inherited per-user unique IP limit
2026-03-15 15:00:20 +03:00
Alexey
4028579068 Inherited per-user unique IP limit 2026-03-15 12:43:31 +03:00
Alexey
58f26ba8a7 Configurable ME draining writer overflow threshold 2026-03-15 12:13:46 +03:00
Alexey
2be3e4ab7f Merge pull request #423 from telemt/bump
Update Cargo.toml
2026-03-15 00:33:09 +03:00
Alexey
3d43ff6e57 Update Cargo.toml 2026-03-15 00:32:57 +03:00
Alexey
1294da586f ME Writer Rebinding - Lifecycle and Consistency fixes: merge pull request #422 from telemt/flow
ME Writer Rebinding - Lifecycle and Consistency fixes
2026-03-15 00:30:57 +03:00
Alexey
ac0698b772 ME Writer Rebinding - Lifecycle and Consistency fixes 2026-03-15 00:17:54 +03:00
Alexey
dda31b3d2f New Hot-Reload method + TLS-F New Methods + TLS-F/TCP-S Docs: merge pull request #420 from telemt/flow
New Hot-Reload method + TLS-F New Methods + TLS-F/TCP-S Docs
2026-03-14 20:45:47 +03:00
Alexey
7d5e1cb9e8 Rename TLS-F-TCP-s.ru.md to TLS-F-TCP-S.ru.md 2026-03-14 20:42:21 +03:00
Alexey
56e38e8d00 Update TLS-F-TCP-s.ru.md 2026-03-14 20:41:14 +03:00
Alexey
4677b43c6e TLS-F New Methods 2026-03-14 20:38:24 +03:00
Alexey
4ddbb97908 Create TLS-F-TCP-s.ru.md 2026-03-14 20:29:12 +03:00
Alexey
8b0b47145d New Hot-Reload method 2026-03-14 18:54:05 +03:00
Alexey
f7e3ddcdb6 Update LICENSE 2026-03-14 16:02:40 +03:00
Alexey
af5cff3304 Merge pull request #417 from telemt/licensing-md
Update LICENSING.md
2026-03-14 15:59:35 +03:00
Alexey
cb9144bdb3 Update LICENSING.md 2026-03-14 15:59:21 +03:00
Alexey
fa82634faf Merge pull request #416 from telemt/license-1
Update LICENSE
2026-03-14 15:57:31 +03:00
Alexey
37b1a0289e Update LICENSE 2026-03-14 15:56:31 +03:00
Alexey
9be33bcf93 Merge pull request #414 from telemt/license
Update LICENSE
2026-03-14 15:27:59 +03:00
Alexey
bc9f691284 Merge branch 'license' of https://github.com/telemt/telemt into license 2026-03-14 15:23:43 +03:00
Alexey
58e5605f39 Telemt PL 3 на русском языке 2026-03-14 15:23:41 +03:00
Alexey
75a654c766 TELEMT-Lizenz 3 auf Deutsch 2026-03-14 15:23:24 +03:00
Alexey
2b058f7df7 Create LICENSE.en.md 2026-03-14 15:11:12 +03:00
Alexey
01af2999bb Update LICENSE 2026-03-14 15:10:46 +03:00
Alexey
c12d27f08a Middle-End docs 2026-03-14 15:10:07 +03:00
Alexey
5e3408e80b Update LICENSE 2026-03-14 15:08:14 +03:00
Alexey
052110618d Merge pull request #413 from telemt/no-config-full
Delete config.full.toml
2026-03-14 14:55:57 +03:00
Alexey
47b8f0f656 Delete config.full.toml 2026-03-14 14:55:48 +03:00
Alexey
67b2e25e39 Merge pull request #396 from 13werwolf13/main
systemd contrib
2026-03-14 14:54:27 +03:00
Alexey
9a08b541ed License:: merge pull request #412 from telemt/license
License
2026-03-14 14:48:06 +03:00
Alexey
04379b4374 Merge branch 'main' into license 2026-03-14 14:47:51 +03:00
Alexey
5cfb05b1f4 Update LICENSING.md 2026-03-14 14:47:21 +03:00
Alexey
aa68ce531e Update LICENSE 2026-03-14 14:42:36 +03:00
Alexey
d4ce304a37 Update LICENSE 2026-03-14 14:40:10 +03:00
Alexey
8a579d9bda Update LICENSE 2026-03-14 14:38:51 +03:00
Alexey
70cc6f22aa Update LICENSE 2026-03-14 14:32:41 +03:00
Alexey
1674ba36b2 Update LICENSE 2026-03-14 14:31:57 +03:00
Alexey
0c1a5c24d5 Update LICENSE 2026-03-14 14:27:45 +03:00
Alexey
5df08300e2 Merge pull request #411 from telemt/license-1
Update LICENSE
2026-03-14 14:08:22 +03:00
Alexey
543a87e166 Update LICENSE 2026-03-14 14:08:08 +03:00
Alexey
519c8d276b Merge pull request #410 from telemt/license
Update LICENSING.md
2026-03-14 14:03:39 +03:00
Alexey
4dc733d3e3 Create LICENSE 2026-03-14 14:03:29 +03:00
Alexey
4506f38bfb Update LICENSING.md 2026-03-14 14:02:12 +03:00
Alexey
b9a33c14bb Merge pull request #409 from telemt/bump
Update Cargo.toml
2026-03-14 13:24:33 +03:00
Alexey
50caeb1803 Update Cargo.toml 2026-03-14 13:24:16 +03:00
Alexey
e57a93880b Src-IP in ME Routing + more strict bind_addresses + ME Gate fixes: merge pull request #408 from telemt/flow
Src-IP in ME Routing + more strict bind_addresses + ME Gate fixes
2026-03-14 13:22:09 +03:00
Alexey
dbfc43395e Merge pull request #407 from farton1983/patch-1
Update QUICK_START_GUIDE.ru.md
2026-03-14 13:11:28 +03:00
farton1983
89923dbaa2 Update QUICK_START_GUIDE.ru.md 2026-03-14 11:07:12 +03:00
Alexey
780fafa604 Src-IP in ME Routing + more strict bind_addresses 2026-03-14 02:20:51 +03:00
Alexey
a15f74a6f9 Configured middle_proxy_nat_ip for ME Gate on strartup 2026-03-13 16:52:24 +03:00
Alexey
690635d904 Merge pull request #404 from telemt/readme
Update README.md
2026-03-12 23:57:51 +03:00
Alexey
d1372c5c1b Update README.md 2026-03-12 23:56:59 +03:00
Дмитрий Марков
5073248911 systemd contrib, add sysuser & tmpfiles configs, fix service 2026-03-12 12:47:03 +05:00
Дмитрий Марков
ae72e6f356 systemd contrib, add sysuser & tmpfiles configs, fix service 2026-03-12 12:26:23 +05:00
Alexey
b8da986fd5 ReRoute + Bnd-checks in API + Per-upstream Runtime Selftest + BSD-Support: merge pull request #394 from telemt/flow
ReRoute + Bnd-checks in API + Per-upstream Runtime Selftest + BSD-Support
2026-03-11 23:34:45 +03:00
Alexey
dd270258bf Merge pull request #393 from DavidOsipov/feature/openbsd-support
feat(platform): add OpenBSD support and low-RAM build optimizations
2026-03-11 23:29:51 +03:00
David Osipov
40dc6a39c1 fix(socket): validate ack_timeout_secs and check setsockopt rc 2026-03-11 21:10:58 +04:00
David Osipov
8b5cbb7b4b Add Rust coding conventions and self-explanatory commenting guidelines; update dependencies and version in Cargo files; enhance OpenBSD support in installation and documentation; improve TCP socket configuration and testing 2026-03-11 20:49:51 +04:00
Alexey
0e476c71a5 Merge pull request #385 from Shulyaka/Shulyaka-patch-2
Document running as unprivileged user in QUICK_START_GUIDE
2026-03-11 11:59:31 +03:00
Alexey
be24b47300 Per-upstream Runtime Selftest 2026-03-10 01:25:28 +03:00
Alexey
8cd719da3f Bnd-block in API fixes 2026-03-10 01:16:21 +03:00
Alexey
959d385015 ReRoute state in API 2026-03-10 00:59:25 +03:00
Denis Shulyaka
a09b597fab Fix the ru translation also 2026-03-09 19:39:55 +03:00
Denis Shulyaka
c920dc6381 Fix config path and update service creation steps
Updated paths and instructions in the quick start guide for Telemt configuration and service setup.
2026-03-09 19:38:55 +03:00
61 changed files with 3631 additions and 1051 deletions

View File

@@ -0,0 +1,135 @@
---
description: 'Rust programming language coding conventions and best practices'
applyTo: '**/*.rs'
---
# Rust Coding Conventions and Best Practices
Follow idiomatic Rust practices and community standards when writing Rust code.
These instructions are based on [The Rust Book](https://doc.rust-lang.org/book/), [Rust API Guidelines](https://rust-lang.github.io/api-guidelines/), [RFC 430 naming conventions](https://github.com/rust-lang/rfcs/blob/master/text/0430-finalizing-naming-conventions.md), and the broader Rust community at [users.rust-lang.org](https://users.rust-lang.org).
## General Instructions
- Always prioritize readability, safety, and maintainability.
- Use strong typing and leverage Rust's ownership system for memory safety.
- Break down complex functions into smaller, more manageable functions.
- For algorithm-related code, include explanations of the approach used.
- Write code with good maintainability practices, including comments on why certain design decisions were made.
- Handle errors gracefully using `Result<T, E>` and provide meaningful error messages.
- For external dependencies, mention their usage and purpose in documentation.
- Use consistent naming conventions following [RFC 430](https://github.com/rust-lang/rfcs/blob/master/text/0430-finalizing-naming-conventions.md).
- Write idiomatic, safe, and efficient Rust code that follows the borrow checker's rules.
- Ensure code compiles without warnings.
## Patterns to Follow
- Use modules (`mod`) and public interfaces (`pub`) to encapsulate logic.
- Handle errors properly using `?`, `match`, or `if let`.
- Use `serde` for serialization and `thiserror` or `anyhow` for custom errors.
- Implement traits to abstract services or external dependencies.
- Structure async code using `async/await` and `tokio` or `async-std`.
- Prefer enums over flags and states for type safety.
- Use builders for complex object creation.
- Split binary and library code (`main.rs` vs `lib.rs`) for testability and reuse.
- Use `rayon` for data parallelism and CPU-bound tasks.
- Use iterators instead of index-based loops as they're often faster and safer.
- Use `&str` instead of `String` for function parameters when you don't need ownership.
- Prefer borrowing and zero-copy operations to avoid unnecessary allocations.
### Ownership, Borrowing, and Lifetimes
- Prefer borrowing (`&T`) over cloning unless ownership transfer is necessary.
- Use `&mut T` when you need to modify borrowed data.
- Explicitly annotate lifetimes when the compiler cannot infer them.
- Use `Rc<T>` for single-threaded reference counting and `Arc<T>` for thread-safe reference counting.
- Use `RefCell<T>` for interior mutability in single-threaded contexts and `Mutex<T>` or `RwLock<T>` for multi-threaded contexts.
## Patterns to Avoid
- Don't use `unwrap()` or `expect()` unless absolutely necessary—prefer proper error handling.
- Avoid panics in library code—return `Result` instead.
- Don't rely on global mutable state—use dependency injection or thread-safe containers.
- Avoid deeply nested logic—refactor with functions or combinators.
- Don't ignore warnings—treat them as errors during CI.
- Avoid `unsafe` unless required and fully documented.
- Don't overuse `clone()`, use borrowing instead of cloning unless ownership transfer is needed.
- Avoid premature `collect()`, keep iterators lazy until you actually need the collection.
- Avoid unnecessary allocations—prefer borrowing and zero-copy operations.
## Code Style and Formatting
- Follow the Rust Style Guide and use `rustfmt` for automatic formatting.
- Keep lines under 100 characters when possible.
- Place function and struct documentation immediately before the item using `///`.
- Use `cargo clippy` to catch common mistakes and enforce best practices.
## Error Handling
- Use `Result<T, E>` for recoverable errors and `panic!` only for unrecoverable errors.
- Prefer `?` operator over `unwrap()` or `expect()` for error propagation.
- Create custom error types using `thiserror` or implement `std::error::Error`.
- Use `Option<T>` for values that may or may not exist.
- Provide meaningful error messages and context.
- Error types should be meaningful and well-behaved (implement standard traits).
- Validate function arguments and return appropriate errors for invalid input.
## API Design Guidelines
### Common Traits Implementation
Eagerly implement common traits where appropriate:
- `Copy`, `Clone`, `Eq`, `PartialEq`, `Ord`, `PartialOrd`, `Hash`, `Debug`, `Display`, `Default`
- Use standard conversion traits: `From`, `AsRef`, `AsMut`
- Collections should implement `FromIterator` and `Extend`
- Note: `Send` and `Sync` are auto-implemented by the compiler when safe; avoid manual implementation unless using `unsafe` code
### Type Safety and Predictability
- Use newtypes to provide static distinctions
- Arguments should convey meaning through types; prefer specific types over generic `bool` parameters
- Use `Option<T>` appropriately for truly optional values
- Functions with a clear receiver should be methods
- Only smart pointers should implement `Deref` and `DerefMut`
### Future Proofing
- Use sealed traits to protect against downstream implementations
- Structs should have private fields
- Functions should validate their arguments
- All public types must implement `Debug`
## Testing and Documentation
- Write comprehensive unit tests using `#[cfg(test)]` modules and `#[test]` annotations.
- Use test modules alongside the code they test (`mod tests { ... }`).
- Write integration tests in `tests/` directory with descriptive filenames.
- Write clear and concise comments for each function, struct, enum, and complex logic.
- Ensure functions have descriptive names and include comprehensive documentation.
- Document all public APIs with rustdoc (`///` comments) following the [API Guidelines](https://rust-lang.github.io/api-guidelines/).
- Use `#[doc(hidden)]` to hide implementation details from public documentation.
- Document error conditions, panic scenarios, and safety considerations.
- Examples should use `?` operator, not `unwrap()` or deprecated `try!` macro.
## Project Organization
- Use semantic versioning in `Cargo.toml`.
- Include comprehensive metadata: `description`, `license`, `repository`, `keywords`, `categories`.
- Use feature flags for optional functionality.
- Organize code into modules using `mod.rs` or named files.
- Keep `main.rs` or `lib.rs` minimal - move logic to modules.
## Quality Checklist
Before publishing or reviewing Rust code, ensure:
### Core Requirements
- [ ] **Naming**: Follows RFC 430 naming conventions
- [ ] **Traits**: Implements `Debug`, `Clone`, `PartialEq` where appropriate
- [ ] **Error Handling**: Uses `Result<T, E>` and provides meaningful error types
- [ ] **Documentation**: All public items have rustdoc comments with examples
- [ ] **Testing**: Comprehensive test coverage including edge cases
### Safety and Quality
- [ ] **Safety**: No unnecessary `unsafe` code, proper error handling
- [ ] **Performance**: Efficient use of iterators, minimal allocations
- [ ] **API Design**: Functions are predictable, flexible, and type-safe
- [ ] **Future Proofing**: Private fields in structs, sealed traits where appropriate
- [ ] **Tooling**: Code passes `cargo fmt`, `cargo clippy`, and `cargo test`

View File

@@ -0,0 +1,162 @@
---
description: 'Guidelines for GitHub Copilot to write comments to achieve self-explanatory code with less comments. Examples are in JavaScript but it should work on any language that has comments.'
applyTo: '**'
---
# Self-explanatory Code Commenting Instructions
## Core Principle
**Write code that speaks for itself. Comment only when necessary to explain WHY, not WHAT.**
We do not need comments most of the time.
## Commenting Guidelines
### ❌ AVOID These Comment Types
**Obvious Comments**
```javascript
// Bad: States the obvious
let counter = 0; // Initialize counter to zero
counter++; // Increment counter by one
```
**Redundant Comments**
```javascript
// Bad: Comment repeats the code
function getUserName() {
return user.name; // Return the user's name
}
```
**Outdated Comments**
```javascript
// Bad: Comment doesn't match the code
// Calculate tax at 5% rate
const tax = price * 0.08; // Actually 8%
```
### ✅ WRITE These Comment Types
**Complex Business Logic**
```javascript
// Good: Explains WHY this specific calculation
// Apply progressive tax brackets: 10% up to 10k, 20% above
const tax = calculateProgressiveTax(income, [0.10, 0.20], [10000]);
```
**Non-obvious Algorithms**
```javascript
// Good: Explains the algorithm choice
// Using Floyd-Warshall for all-pairs shortest paths
// because we need distances between all nodes
for (let k = 0; k < vertices; k++) {
for (let i = 0; i < vertices; i++) {
for (let j = 0; j < vertices; j++) {
// ... implementation
}
}
}
```
**Regex Patterns**
```javascript
// Good: Explains what the regex matches
// Match email format: username@domain.extension
const emailPattern = /^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$/;
```
**API Constraints or Gotchas**
```javascript
// Good: Explains external constraint
// GitHub API rate limit: 5000 requests/hour for authenticated users
await rateLimiter.wait();
const response = await fetch(githubApiUrl);
```
## Decision Framework
Before writing a comment, ask:
1. **Is the code self-explanatory?** → No comment needed
2. **Would a better variable/function name eliminate the need?** → Refactor instead
3. **Does this explain WHY, not WHAT?** → Good comment
4. **Will this help future maintainers?** → Good comment
## Special Cases for Comments
### Public APIs
```javascript
/**
* Calculate compound interest using the standard formula.
*
* @param {number} principal - Initial amount invested
* @param {number} rate - Annual interest rate (as decimal, e.g., 0.05 for 5%)
* @param {number} time - Time period in years
* @param {number} compoundFrequency - How many times per year interest compounds (default: 1)
* @returns {number} Final amount after compound interest
*/
function calculateCompoundInterest(principal, rate, time, compoundFrequency = 1) {
// ... implementation
}
```
### Configuration and Constants
```javascript
// Good: Explains the source or reasoning
const MAX_RETRIES = 3; // Based on network reliability studies
const API_TIMEOUT = 5000; // AWS Lambda timeout is 15s, leaving buffer
```
### Annotations
```javascript
// TODO: Replace with proper user authentication after security review
// FIXME: Memory leak in production - investigate connection pooling
// HACK: Workaround for bug in library v2.1.0 - remove after upgrade
// NOTE: This implementation assumes UTC timezone for all calculations
// WARNING: This function modifies the original array instead of creating a copy
// PERF: Consider caching this result if called frequently in hot path
// SECURITY: Validate input to prevent SQL injection before using in query
// BUG: Edge case failure when array is empty - needs investigation
// REFACTOR: Extract this logic into separate utility function for reusability
// DEPRECATED: Use newApiFunction() instead - this will be removed in v3.0
```
## Anti-Patterns to Avoid
### Dead Code Comments
```javascript
// Bad: Don't comment out code
// const oldFunction = () => { ... };
const newFunction = () => { ... };
```
### Changelog Comments
```javascript
// Bad: Don't maintain history in comments
// Modified by John on 2023-01-15
// Fixed bug reported by Sarah on 2023-02-03
function processData() {
// ... implementation
}
```
### Divider Comments
```javascript
// Bad: Don't use decorative comments
//=====================================
// UTILITY FUNCTIONS
//=====================================
```
## Quality Checklist
Before committing, ensure your comments:
- [ ] Explain WHY, not WHAT
- [ ] Are grammatically correct and clear
- [ ] Will remain accurate as code evolves
- [ ] Add genuine value to code understanding
- [ ] Are placed appropriately (above the code they describe)
- [ ] Use proper spelling and professional language
## Summary
Remember: **The best comment is the one you don't need to write because the code is self-documenting.**

2
Cargo.lock generated
View File

@@ -2087,7 +2087,7 @@ dependencies = [
[[package]]
name = "telemt"
version = "3.1.3"
version = "3.3.15"
dependencies = [
"aes",
"anyhow",

View File

@@ -1,6 +1,6 @@
[package]
name = "telemt"
version = "3.3.15"
version = "3.3.19"
edition = "2024"
[dependencies]
@@ -73,3 +73,6 @@ futures = "0.3"
[[bench]]
name = "crypto_bench"
harness = false
[profile.release]
lto = "thin"

165
LICENSE Normal file
View File

@@ -0,0 +1,165 @@
###### TELEMT Public License 3 ######
##### Copyright (c) 2026 Telemt #####
Permission is hereby granted, free of charge, to any person obtaining a copy
of this Software and associated documentation files (the "Software"),
to use, reproduce, modify, prepare derivative works of, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, provided that all
copyright notices, license terms, and conditions set forth in this License
are preserved and complied with.
### Official Translations
The canonical version of this License is the English version.
Official translations are provided for informational purposes only
and for convenience, and do not have legal force. In case of any
discrepancy, the English version of this License shall prevail.
Available versions:
- English in Markdown: docs/LICENSE/LICENSE.md
- German: docs/LICENSE/LICENSE.de.md
- Russian: docs/LICENSE/LICENSE.ru.md
### License Versioning Policy
This License is version 3 of the TELEMT Public License.
Each version of the Software is licensed under the License that
accompanies its corresponding source code distribution.
Future versions of the Software may be distributed under a different
version of the TELEMT Public License or under a different license,
as determined by the Telemt maintainers.
Any such change of license applies only to the versions of the
Software distributed with the new license and SHALL NOT retroactively
affect any previously released versions of the Software.
Recipients of the Software are granted rights only under the License
provided with the version of the Software they received.
Redistributions of the Software, including Modified Versions, MUST
preserve the copyright notices, license text, and conditions of this
License for all portions of the Software derived from Telemt.
Additional terms or licenses may be applied to modifications or
additional code added by a redistributor, provided that such terms
do not restrict or alter the rights granted under this License for
the original Telemt Software.
Nothing in this section limits the rights granted under this License
for versions of the Software already released.
### Definitions
For the purposes of this License:
- "Software" means the Telemt software, including source code, documentation,
and any associated files distributed under this License.
- "Contributor" means any person or entity that submits code, patches,
documentation, or other contributions to the Software that are accepted
into the Software by the maintainers.
- "Contribution" means any work of authorship intentionally submitted
to the Software for inclusion in the Software.
- "Modified Version" means any version of the Software that has been
changed, adapted, extended, or otherwise modified from the original
Software.
- "Maintainers" means the individuals or entities responsible for
the official Telemt project and its releases.
#### 1 Attribution
Redistributions of the Software, in source or binary form, MUST RETAIN the
above copyright notice, this license text, and any existing attribution
notices.
#### 2 Modification Notice
If you modify the Software, you MUST clearly state that the Software has been
modified and include a brief description of the changes made.
Modified versions MUST NOT be presented as the original Telemt.
#### 3 Trademark and Branding
This license DOES NOT grant permission to use the name "Telemt",
the Telemt logo, or any Telemt trademarks or branding.
Redistributed or modified versions of the Software MAY NOT use the Telemt
name in a way that suggests endorsement or official origin without explicit
permission from the Telemt maintainers.
Use of the name "Telemt" to describe a modified version of the Software
is permitted only if the modified version is clearly identified as a
modified or unofficial version.
Any distribution that could reasonably confuse users into believing that
the software is an official Telemt release is prohibited.
#### 4 Binary Distribution Transparency
If you distribute compiled binaries of the Software,
you are ENCOURAGED to provide access to the corresponding
source code and build instructions where reasonably possible.
This helps preserve transparency and allows recipients to verify the
integrity and reproducibility of distributed builds.
#### 5 Patent Grant and Defensive Termination Clause
Each contributor grants you a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Software.
This patent license applies only to those patent claims necessarily
infringed by the contributors contribution alone or by combination of
their contribution with the Software.
If you initiate or participate in any patent litigation, including
cross-claims or counterclaims, alleging that the Software or any
contribution incorporated within the Software constitutes patent
infringement, then **all rights granted to you under this license shall
terminate immediately** as of the date such litigation is filed.
Additionally, if you initiate legal action alleging that the
Software itself infringes your patent or other intellectual
property rights, then all rights granted to you under this
license SHALL TERMINATE automatically.
#### 6 Contributions
Unless you explicitly state otherwise, any Contribution intentionally
submitted for inclusion in the Software shall be licensed under the terms
of this License.
By submitting a Contribution, you grant the Telemt maintainers and all
recipients of the Software the rights described in this License with
respect to that Contribution.
#### 7 Network Use Attribution
If the Software is used to provide a publicly accessible network service,
the operator of such service SHOULD provide attribution to Telemt in at least
one of the following locations:
- service documentation
- service description
- an "About" or similar informational page
- other user-visible materials reasonably associated with the service
Such attribution MUST NOT imply endorsement by the Telemt project or its
maintainers.
#### 8 Disclaimer of Warranty and Severability Clause
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE
IF ANY PROVISION OF THIS LICENSE IS HELD TO BE INVALID OR UNENFORCEABLE,
SUCH PROVISION SHALL BE INTERPRETED TO REFLECT THE ORIGINAL INTENT
OF THE PARTIES AS CLOSELY AS POSSIBLE, AND THE REMAINING PROVISIONS
SHALL REMAIN IN FULL FORCE AND EFFECT

View File

@@ -1,17 +1,12 @@
# LICENSING
## Licenses for Versions
| Version | License |
|---------|---------------|
| 1.0 | NO LICNESE |
| 1.1 | NO LICENSE |
| 1.2 | NO LICENSE |
| 2.0 | NO LICENSE |
| 3.0 | TELEMT UL 1 |
| Version ≥ | Version ≤ | License |
|-----------|-----------|---------------|
| 1.0 | 3.3.17 | NO LICNESE |
| 3.3.18 | 3.4.0 | TELEMT PL 3 |
### License Types
- **NO LICENSE** = ***ALL RIGHT RESERVED***
- **TELEMT UL1** - work in progress license for source code of `telemt`, which encourages:
- fair use,
- contributions,
- distribution,
- but prohibits NOT mentioning the authors
- **TELEMT PL** - special Telemt Public License based on Apache License 2 principles
## [Telemt Public License 3](https://github.com/telemt/telemt/blob/main/LICENSE)

View File

@@ -19,22 +19,9 @@
### 🇷🇺 RU
#### Релиз 3.3.5 LTS - 6 марта
#### Релиз 3.3.15 Semistable
6 марта мы выпустили Telemt **3.3.5**
Это [3.3.5 - первая LTS-версия telemt](https://github.com/telemt/telemt/releases/tag/3.3.5)!
В ней используется:
- новый алгоритм ME NoWait для непревзойдённо быстрого восстановления пула
- Adaptive Floor, поддерживающий количество ME Writer на оптимальном уровне
- модель усовершенствованного доступа к KDF Fingerprint на RwLock
- строгая привязка Middle-End к DC-ID с предсказуемым алгоритмом деградации и самовосстановления
Telemt Control API V1 в 3.3.5 включает:
- несколько режимов работы в зависимости от доступных ресурсов
- снапшот-модель для живых метрик без вмешательства в hot-path
- минималистичный набор запросов для управления пользователями
[3.3.15](https://github.com/telemt/telemt/releases/tag/3.3.15) по итогам работы в продакшн признан одним из самых стабильных и рекомендуется к использованию, когда cutting-edge фичи некритичны!
Будем рады вашему фидбеку и предложениям по улучшению — особенно в части **API**, **статистики**, **UX**
@@ -53,22 +40,9 @@ Telemt Control API V1 в 3.3.5 включает:
### 🇬🇧 EN
#### Release 3.3.5 LTS - March 6
#### Release 3.3.15 Semistable
On March 6, we released Telemt **3.3.3**
This is [3.3.5 - the first LTS release of telemt](https://github.com/telemt/telemt/releases/tag/3.3.5)
It introduces:
- the new ME NoWait algorithm for exceptionally fast pool recovery
- Adaptive Floor, which maintains the number of ME Writers at an optimal level
- an improved KDF Fingerprint access model based on RwLock
- strict binding of Middle-End instances to DC-ID with a predictable degradation and self-recovery algorithm
Telemt Control API V1 in version 3.3.5 includes:
- multiple operating modes depending on available resources
- a snapshot-based model for live metrics without interfering with the hot path
- a minimalistic request set for user management
[3.3.15](https://github.com/telemt/telemt/releases/tag/3.3.15) is, based on the results of his work in production, recognized as one of the most stable and recommended for use when cutting-edge features are not so necessary!
We are looking forward to your feedback and improvement proposals — especially regarding **API**, **statistics**, **UX**
@@ -264,6 +238,11 @@ git clone https://github.com/telemt/telemt
cd telemt
# Starting Release Build
cargo build --release
# Low-RAM devices (1 GB, e.g. NanoPi Neo3 / Raspberry Pi Zero 2):
# release profile uses lto = "thin" to reduce peak linker memory.
# If your custom toolchain overrides profiles, avoid enabling fat LTO.
# Move to /bin
mv ./target/release/telemt /bin
# Make executable
@@ -272,6 +251,12 @@ chmod +x /bin/telemt
telemt config.toml
```
### OpenBSD
- Build and service setup guide: [OpenBSD Guide (EN)](docs/OPENBSD.en.md)
- Example rc.d script: [contrib/openbsd/telemt.rcd](contrib/openbsd/telemt.rcd)
- Status: OpenBSD sandbox hardening with `pledge(2)` and `unveil(2)` is not implemented yet.
## Why Rust?
- Long-running reliability and idempotent behavior
- Rust's deterministic resource management - RAII

View File

@@ -1,697 +0,0 @@
# ==============================================================================
#
# TELEMT — Advanced Rust-based Telegram MTProto Proxy
# Full Configuration Reference
#
# This file is both a working config and a complete documentation.
# Every parameter is explained. Read it top to bottom before deploying.
#
# Quick Start:
# 1. Set [server].port to your desired port (443 recommended)
# 2. Generate a secret: openssl rand -hex 16
# 3. Put it in [access.users] under a name you choose
# 4. Set [censorship].tls_domain to a popular unblocked HTTPS site
# 5. Set your public IP in [general].middle_proxy_nat_ip
# and [general.links].public_host
# 6. Set announce IP in [[server.listeners]]
# 7. Run Telemt. It prints a tg:// link. Send it to your users.
#
# Modes of Operation:
# Direct Mode (use_middle_proxy = false)
# Connects straight to Telegram DCs via TCP. Simple, fast, low overhead.
# No ad_tag support. No CDN DC support (203, etc).
#
# Middle-Proxy Mode (use_middle_proxy = true)
# Connects to Telegram Middle-End servers via RPC protocol.
# Required for ad_tag monetization and CDN support.
# Requires proxy_secret_path and a valid public IP.
#
# ==============================================================================
# ==============================================================================
# LEGACY TOP-LEVEL FIELDS
# ==============================================================================
# Deprecated. Use [general.links].show instead.
# Accepts "*" for all users, or an array like ["alice", "bob"].
show_link = ["0"]
# Fallback Datacenter index (1-5) when a client requests an unknown DC ID.
# DC 2 is Amsterdam (Europe), closest for most CIS users.
# default_dc = 2
# ==============================================================================
# GENERAL SETTINGS
# ==============================================================================
[general]
# ------------------------------------------------------------------------------
# Core Protocol
# ------------------------------------------------------------------------------
# Coalesce the MTProto handshake and first data payload into a single TCP packet.
# Significantly reduces connection latency. No reason to disable.
fast_mode = true
# How the proxy connects to Telegram servers.
# false = Direct TCP to Telegram DCs (simple, low overhead)
# true = Middle-End RPC protocol (required for ad_tag and CDN DCs)
use_middle_proxy = true
# 32-char hex Ad-Tag from @MTProxybot for sponsored channel injection.
# Only works when use_middle_proxy = true.
# Obtain yours: message @MTProxybot on Telegram, register your proxy.
# ad_tag = "00000000000000000000000000000000"
# ------------------------------------------------------------------------------
# Middle-End Authentication
# ------------------------------------------------------------------------------
# Path to the Telegram infrastructure AES key file.
# Auto-downloaded from https://core.telegram.org/getProxySecret on first run.
# This key authenticates your proxy with Middle-End servers.
proxy_secret_path = "proxy-secret"
# ------------------------------------------------------------------------------
# Public IP Configuration (Critical for Middle-Proxy Mode)
# ------------------------------------------------------------------------------
# Your server's PUBLIC IPv4 address.
# Middle-End servers need this for the cryptographic Key Derivation Function.
# If your server has a direct public IP, set it here.
# If behind NAT (AWS, Docker, etc.), this MUST be your external IP.
# If omitted, Telemt uses STUN to auto-detect (see middle_proxy_nat_probe).
# middle_proxy_nat_ip = "203.0.113.10"
# Auto-detect public IP via STUN servers defined in [network].
# Set to false if you hardcoded middle_proxy_nat_ip above.
# Set to true if you want automatic detection.
middle_proxy_nat_probe = true
# ------------------------------------------------------------------------------
# Middle-End Connection Pool
# ------------------------------------------------------------------------------
# Number of persistent multiplexed RPC connections to ME servers.
# All client traffic is routed through these "fat pipes".
# 8 handles thousands of concurrent users comfortably.
middle_proxy_pool_size = 8
# Legacy field. Connections kept initialized but idle as warm standby.
middle_proxy_warm_standby = 16
# ------------------------------------------------------------------------------
# Middle-End Keepalive
# Telegram ME servers aggressively kill idle TCP connections.
# These settings send periodic RPC_PING frames to keep pipes alive.
# ------------------------------------------------------------------------------
me_keepalive_enabled = true
# Base interval between pings in seconds.
me_keepalive_interval_secs = 25
# Random jitter added to interval to prevent all connections pinging simultaneously.
me_keepalive_jitter_secs = 5
# Randomize ping payload bytes to prevent DPI from fingerprinting ping patterns.
me_keepalive_payload_random = true
# ------------------------------------------------------------------------------
# Client-Side Limits
# ------------------------------------------------------------------------------
# Max buffered ciphertext per client (bytes) when upstream is slow.
# Acts as backpressure to prevent memory exhaustion. 256KB is safe.
crypto_pending_buffer = 262144
# Maximum single MTProto frame size from client. 16MB is protocol standard.
max_client_frame = 16777216
# ------------------------------------------------------------------------------
# Crypto Desynchronization Logging
# Desync errors usually mean DPI/GFW is tampering with connections.
# ------------------------------------------------------------------------------
# true = full forensics (trace ID, IP hash, hex dumps) for EVERY desync event
# false = deduplicated logging, one entry per time window (prevents log spam)
# Set true if you are actively debugging DPI interference.
desync_all_full = true
# ------------------------------------------------------------------------------
# Beobachten — Built-in Honeypot / Active Probe Tracker
# Tracks IPs that fail handshakes or behave like TLS scanners.
# Output file can be fed into fail2ban or iptables for auto-blocking.
# ------------------------------------------------------------------------------
beobachten = true
# How long (minutes) to remember a suspicious IP before expiring it.
beobachten_minutes = 30
# How often (seconds) to flush tracker state to disk.
beobachten_flush_secs = 15
# File path for the tracker output.
beobachten_file = "cache/beobachten.txt"
# ------------------------------------------------------------------------------
# Hardswap — Zero-Downtime ME Pool Rotation
# When Telegram updates ME server IPs, Hardswap creates a completely new pool,
# waits until it is fully ready, migrates traffic, then kills the old pool.
# Users experience zero interruption.
# ------------------------------------------------------------------------------
hardswap = true
# ------------------------------------------------------------------------------
# ME Pool Warmup Staggering
# When creating a new pool, connections are opened one by one with delays
# to avoid a burst of SYN packets that could trigger ISP flood protection.
# ------------------------------------------------------------------------------
me_warmup_stagger_enabled = true
# Delay between each connection creation (milliseconds).
me_warmup_step_delay_ms = 500
# Random jitter added to the delay (milliseconds).
me_warmup_step_jitter_ms = 300
# ------------------------------------------------------------------------------
# ME Reconnect Backoff
# If an ME server drops the connection, Telemt retries with this strategy.
# ------------------------------------------------------------------------------
# Max simultaneous reconnect attempts per DC.
me_reconnect_max_concurrent_per_dc = 8
# Exponential backoff base (milliseconds).
me_reconnect_backoff_base_ms = 500
# Backoff ceiling (milliseconds). Will never wait longer than this.
me_reconnect_backoff_cap_ms = 30000
# Number of instant retries before switching to exponential backoff.
me_reconnect_fast_retry_count = 12
# ------------------------------------------------------------------------------
# NAT Mismatch Behavior
# If STUN-detected IP differs from local interface IP (you are behind NAT).
# false = abort ME mode (safe default)
# true = force ME mode anyway (use if you know your NAT setup is correct)
# ------------------------------------------------------------------------------
stun_iface_mismatch_ignore = false
# ------------------------------------------------------------------------------
# Logging
# ------------------------------------------------------------------------------
# File to log unknown DC requests (DC IDs outside standard 1-5).
unknown_dc_log_path = "unknown-dc.txt"
# Verbosity: "debug" | "verbose" | "normal" | "silent"
log_level = "normal"
# Disable ANSI color codes in log output (useful for file logging).
disable_colors = false
# ------------------------------------------------------------------------------
# FakeTLS Record Sizing
# Buffer small MTProto packets into larger TLS records to mimic real HTTPS.
# Real HTTPS servers send records close to MTU size (~1400 bytes).
# A stream of tiny TLS records is a strong DPI signal.
# Set to 0 to disable. Set to 1400 for realistic HTTPS emulation.
# ------------------------------------------------------------------------------
fast_mode_min_tls_record = 1400
# ------------------------------------------------------------------------------
# Periodic Updates
# ------------------------------------------------------------------------------
# How often (seconds) to re-fetch ME server lists and proxy secrets
# from core.telegram.org. Keeps your proxy in sync with Telegram infrastructure.
update_every = 300
# How often (seconds) to force a Hardswap even if the ME map is unchanged.
# Shorter intervals mean shorter-lived TCP flows, harder for DPI to profile.
me_reinit_every_secs = 600
# ------------------------------------------------------------------------------
# Hardswap Warmup Tuning
# Fine-grained control over how the new pool is warmed up before traffic switch.
# ------------------------------------------------------------------------------
me_hardswap_warmup_delay_min_ms = 1000
me_hardswap_warmup_delay_max_ms = 2000
me_hardswap_warmup_extra_passes = 3
me_hardswap_warmup_pass_backoff_base_ms = 500
# ------------------------------------------------------------------------------
# Config Update Debouncing
# Telegram sometimes pushes transient/broken configs. Debouncing requires
# N consecutive identical fetches before applying a change.
# ------------------------------------------------------------------------------
# ME server list must be identical for this many fetches before applying.
me_config_stable_snapshots = 2
# Minimum seconds between config applications.
me_config_apply_cooldown_secs = 300
# Proxy secret must be identical for this many fetches before applying.
proxy_secret_stable_snapshots = 2
# ------------------------------------------------------------------------------
# Proxy Secret Rotation
# ------------------------------------------------------------------------------
# Apply newly downloaded secrets at runtime without restart.
proxy_secret_rotate_runtime = true
# Maximum acceptable secret length (bytes). Rejects abnormally large secrets.
proxy_secret_len_max = 256
# ------------------------------------------------------------------------------
# Hardswap Drain Settings
# Controls graceful shutdown of old ME connections during pool rotation.
# ------------------------------------------------------------------------------
# Seconds to keep old connections alive for in-flight data before force-closing.
me_pool_drain_ttl_secs = 90
# Minimum ratio of healthy connections in new pool before draining old pool.
# 0.8 = at least 80% of new pool must be ready.
me_pool_min_fresh_ratio = 0.8
# Maximum seconds to wait for drain to complete before force-killing.
me_reinit_drain_timeout_secs = 120
# ------------------------------------------------------------------------------
# NTP Clock Check
# MTProto uses timestamps. Clock drift > 30 seconds breaks handshakes.
# Telemt checks on startup and warns if out of sync.
# ------------------------------------------------------------------------------
ntp_check = true
ntp_servers = ["pool.ntp.org"]
# ------------------------------------------------------------------------------
# Auto-Degradation
# If ME servers become completely unreachable (ISP blocking),
# automatically fall back to Direct Mode so users stay connected.
# ------------------------------------------------------------------------------
auto_degradation_enabled = true
# Number of DC groups that must be unreachable before triggering fallback.
degradation_min_unavailable_dc_groups = 2
# ==============================================================================
# ALLOWED CLIENT PROTOCOLS
# Only enable what you need. In censored regions, TLS-only is safest.
# ==============================================================================
[general.modes]
# Classic MTProto. Unobfuscated length prefixes. Trivially detected by DPI.
# No reason to enable unless you have ancient clients.
classic = false
# Obfuscated MTProto with randomized padding. Better than classic, but
# still detectable by statistical analysis of packet sizes.
secure = false
# FakeTLS (ee-secrets). Wraps MTProto in TLS 1.3 framing.
# To DPI, it looks like a normal HTTPS connection.
# This should be the ONLY enabled mode in censored environments.
tls = true
# ==============================================================================
# STARTUP LINK GENERATION
# Controls what tg:// invite links are printed to console on startup.
# ==============================================================================
[general.links]
# Which users to generate links for.
# "*" = all users, or an array like ["alice", "bob"].
show = "*"
# IP or domain to embed in the tg:// link.
# If omitted, Telemt uses STUN to auto-detect.
# Set this to your server's public IP or domain for reliable links.
# public_host = "proxy.example.com"
# Port to embed in the tg:// link.
# If omitted, uses [server].port.
# public_port = 443
# ==============================================================================
# NETWORK & IP RESOLUTION
# ==============================================================================
[network]
# Enable IPv4 for outbound connections to Telegram.
ipv4 = true
# Enable IPv6 for outbound connections to Telegram.
ipv6 = false
# Prefer IPv4 (4) or IPv6 (6) when both are available.
prefer = 4
# Experimental: use both IPv4 and IPv6 ME servers simultaneously.
# May improve reliability but doubles connection count.
multipath = false
# STUN servers for external IP discovery.
# Used for Middle-Proxy KDF (if nat_probe=true) and link generation.
stun_servers = [
"stun.l.google.com:5349",
"stun1.l.google.com:3478",
"stun.gmx.net:3478",
"stun.l.google.com:19302"
]
# If UDP STUN is blocked, attempt TCP-based STUN as fallback.
stun_tcp_fallback = true
# If all STUN fails, use HTTP APIs to discover public IP.
http_ip_detect_urls = [
"https://ifconfig.me/ip",
"https://api.ipify.org"
]
# Cache discovered public IP to this file to survive restarts.
cache_public_ip_path = "cache/public_ip.txt"
# ==============================================================================
# SERVER BINDING & METRICS
# ==============================================================================
[server]
# TCP port to listen on.
# 443 is recommended (looks like normal HTTPS traffic).
port = 443
# IPv4 bind address. "0.0.0.0" = all interfaces.
listen_addr_ipv4 = "0.0.0.0"
# IPv6 bind address. "::" = all interfaces.
listen_addr_ipv6 = "::"
# Unix socket listener (for reverse proxy setups with Nginx/HAProxy).
# listen_unix_sock = "/var/run/telemt.sock"
# listen_unix_sock_perm = "0660"
# Enable PROXY protocol header parsing.
# Set true ONLY if Telemt is behind HAProxy/Nginx that injects PROXY headers.
# If enabled without a proxy in front, clients will fail to connect.
proxy_protocol = false
# Prometheus metrics HTTP endpoint port.
# Uncomment to enable. Access at http://your-server:9090/metrics
# metrics_port = 9090
# IP ranges allowed to access the metrics endpoint.
metrics_whitelist = [
"127.0.0.1/32",
"::1/128"
]
# ------------------------------------------------------------------------------
# Listener Overrides
# Define explicit listeners with specific bind IPs and announce IPs.
# The announce IP is what gets embedded in tg:// links and sent to ME servers.
# You MUST set announce to your server's public IP for ME mode to work.
# ------------------------------------------------------------------------------
# [[server.listeners]]
# ip = "0.0.0.0"
# announce = "203.0.113.10"
# reuse_allow = false
# ==============================================================================
# TIMEOUTS (seconds unless noted)
# ==============================================================================
[timeouts]
# Maximum time for client to complete FakeTLS + MTProto handshake.
client_handshake = 15
# Maximum time to establish TCP connection to upstream Telegram DC.
tg_connect = 10
# TCP keepalive interval for client connections.
client_keepalive = 60
# Maximum client inactivity before dropping the connection.
client_ack = 300
# Instant retry count for a single ME endpoint before giving up on it.
me_one_retry = 3
# Timeout (milliseconds) for a single ME endpoint connection attempt.
me_one_timeout_ms = 1500
# ==============================================================================
# ANTI-CENSORSHIP / FAKETLS / MASKING
# This is where Telemt becomes invisible to Deep Packet Inspection.
# ==============================================================================
[censorship]
# ------------------------------------------------------------------------------
# TLS Domain Fronting
# The SNI (Server Name Indication) your proxy presents to connecting clients.
# Must be a popular, unblocked HTTPS website in your target country.
# DPI sees traffic to this domain. Choose carefully.
# Good choices: major CDNs, banks, government sites, search engines.
# Bad choices: obscure sites, already-blocked domains.
# ------------------------------------------------------------------------------
tls_domain = "www.google.com"
# ------------------------------------------------------------------------------
# Active Probe Masking
# When someone connects but fails the MTProto handshake (wrong secret),
# they might be an ISP active prober testing if this is a proxy.
#
# mask = false: drop the connection (prober knows something is here)
# mask = true: transparently proxy them to mask_host (prober sees a real website)
#
# With mask enabled, your server is indistinguishable from a real web server
# to anyone who doesn't have the correct secret.
# ------------------------------------------------------------------------------
mask = true
# The real web server to forward failed handshakes to.
# If omitted, defaults to tls_domain.
# mask_host = "www.google.com"
# Port on the mask host to connect to.
mask_port = 443
# Inject PROXY protocol header when forwarding to mask host.
# 0 = disabled, 1 = v1, 2 = v2. Leave disabled unless mask_host expects it.
# mask_proxy_protocol = 0
# ------------------------------------------------------------------------------
# TLS Certificate Emulation
# ------------------------------------------------------------------------------
# Size (bytes) of the locally generated fake TLS certificate.
# Only used when tls_emulation is disabled.
fake_cert_len = 2048
# KILLER FEATURE: Real-Time TLS Emulation.
# Telemt connects to tls_domain, fetches its actual TLS 1.3 certificate chain,
# and exactly replicates the byte sizes of ServerHello and Certificate records.
# Defeats DPI that uses TLS record length heuristics to detect proxies.
# Strongly recommended in censored environments.
tls_emulation = true
# Directory to cache fetched TLS certificates.
tls_front_dir = "tlsfront"
# ------------------------------------------------------------------------------
# ServerHello Timing
# Real web servers take 30-150ms to respond to ClientHello due to network
# latency and crypto processing. A proxy responding in <1ms is suspicious.
# These settings add realistic delay to mimic genuine server behavior.
# ------------------------------------------------------------------------------
# Minimum delay before sending ServerHello (milliseconds).
server_hello_delay_min_ms = 50
# Maximum delay before sending ServerHello (milliseconds).
server_hello_delay_max_ms = 150
# ------------------------------------------------------------------------------
# TLS Session Tickets
# Real TLS 1.3 servers send 1-2 NewSessionTicket messages after handshake.
# A server that sends zero tickets is anomalous and may trigger DPI flags.
# Set this to match your tls_domain's behavior (usually 2).
# ------------------------------------------------------------------------------
# tls_new_session_tickets = 0
# ------------------------------------------------------------------------------
# Full Certificate Frequency
# When tls_emulation is enabled, this controls how often (per client IP)
# to send the complete emulated certificate chain.
#
# > 0: Subsequent connections within TTL seconds get a smaller cached version.
# Saves bandwidth but creates a detectable size difference between
# first and repeat connections.
#
# = 0: Every connection gets the full certificate. More bandwidth but
# perfectly consistent behavior, no anomalies for DPI to detect.
# ------------------------------------------------------------------------------
tls_full_cert_ttl_secs = 0
# ------------------------------------------------------------------------------
# ALPN Enforcement
# Ensure ServerHello responds with the exact ALPN protocol the client requested.
# Mismatched ALPN (e.g., client asks h2, server says http/1.1) is a DPI red flag.
# ------------------------------------------------------------------------------
alpn_enforce = true
# ==============================================================================
# ACCESS CONTROL & USERS
# ==============================================================================
[access]
# ------------------------------------------------------------------------------
# Replay Attack Protection
# DPI can record a legitimate user's handshake and replay it later to probe
# whether the server is a proxy. Telemt remembers recent handshake nonces
# and rejects duplicates.
# ------------------------------------------------------------------------------
# Number of nonce slots in the replay detection buffer.
replay_check_len = 65536
# How long (seconds) to remember nonces before expiring them.
replay_window_secs = 1800
# Allow clients with incorrect system clocks to connect.
# false = reject clients with significant time skew (more secure)
# true = accept anyone regardless of clock (more permissive)
ignore_time_skew = false
# ------------------------------------------------------------------------------
# User Secrets
# Each user needs a unique 32-character hex string as their secret.
# Generate with: openssl rand -hex 16
#
# This secret is embedded in the tg:// link. Anyone with it can connect.
# Format: username = "hex_secret"
# ------------------------------------------------------------------------------
[access.users]
# alice = "0123456789abcdef0123456789abcdef"
# bob = "fedcba9876543210fedcba9876543210"
# ------------------------------------------------------------------------------
# Per-User Connection Limits
# Limits concurrent TCP connections per user to prevent secret sharing.
# Uncomment and set for each user as needed.
# ------------------------------------------------------------------------------
[access.user_max_tcp_conns]
# alice = 100
# bob = 50
# ------------------------------------------------------------------------------
# Per-User Expiration Dates
# Automatically revoke access after the specified date (ISO 8601 format).
# ------------------------------------------------------------------------------
[access.user_expirations]
# alice = "2025-12-31T23:59:59Z"
# bob = "2026-06-15T00:00:00Z"
# ------------------------------------------------------------------------------
# Per-User Data Quotas
# Maximum total bytes transferred per user. Connection refused after limit.
# ------------------------------------------------------------------------------
[access.user_data_quota]
# alice = 107374182400
# bob = 53687091200
# ------------------------------------------------------------------------------
# Per-User Unique IP Limits
# Maximum number of different IP addresses that can use this secret
# at the same time. Highly effective against secret leaking/sharing.
# Set to 1 for single-device, 2-3 for phone+desktop, etc.
# ------------------------------------------------------------------------------
[access.user_max_unique_ips]
# alice = 3
# bob = 2
# ==============================================================================
# UPSTREAM ROUTING
# Controls how Telemt connects to Telegram servers (or ME servers).
# If omitted entirely, uses the OS default route.
# ==============================================================================
# ------------------------------------------------------------------------------
# Direct upstream: use the server's own network interface.
# You can optionally bind to a specific interface or local IP.
# ------------------------------------------------------------------------------
# [[upstreams]]
# type = "direct"
# interface = "eth0"
# bind_addresses = ["192.0.2.10"]
# weight = 1
# enabled = true
# scopes = "*"
# ------------------------------------------------------------------------------
# SOCKS5 upstream: route Telegram traffic through a SOCKS5 proxy.
# Useful if your server's IP is blocked from reaching Telegram DCs.
# ------------------------------------------------------------------------------
# [[upstreams]]
# type = "socks5"
# address = "198.51.100.30:1080"
# username = "proxy-user"
# password = "proxy-pass"
# weight = 1
# enabled = true
# ==============================================================================
# DATACENTER OVERRIDES
# Force specific DC IDs to route to specific IP:Port combinations.
# DC 203 (CDN) is auto-injected by Telemt if not specified here.
# ==============================================================================
# [dc_overrides]
# "201" = "149.154.175.50:443"
# "202" = ["149.154.167.51:443", "149.154.175.100:443"]

View File

@@ -0,0 +1,16 @@
#!/bin/ksh
# /etc/rc.d/telemt
#
# rc.d(8) script for Telemt MTProxy daemon.
# Tokio runtime does not daemonize itself, so rc_bg=YES is used.
daemon="/usr/local/bin/telemt"
daemon_user="_telemt"
daemon_flags="/etc/telemt/config.toml"
. /etc/rc.d/rc.subr
rc_bg=YES
rc_reload=NO
rc_cmd $1

View File

@@ -0,0 +1,3 @@
u telemt - "telemt user" /var/lib/telemt -
g telemt - -
m telemt telemt

View File

@@ -0,0 +1,21 @@
[Unit]
Description=Telemt
Wants=network-online.target
After=multi-user.target network.target network-online.target
[Service]
Type=simple
User=telemt
Group=telemt
WorkingDirectory=/var/lib/telemt
ExecStart=/usr/bin/telemt /etc/telemt/telemt.toml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
AmbientCapabilities=CAP_NET_BIND_SERVICE
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
NoNewPrivileges=true
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1 @@
d /var/lib/telemt 700 telemt telemt

View File

@@ -55,7 +55,10 @@ user2 = "00000000000000000000000000000002"
user3 = "00000000000000000000000000000003"
```
4. Save the config. Ctrl+S -> Ctrl+X. You don't need to restart telemt.
5. Get the links via `journalctl -u telemt -n -g "links" --no-pager -o cat | tac`
5. Get the links via
```bash
curl -s http://127.0.0.1:9091/v1/users | jq
```
## How to view metrics

View File

@@ -55,7 +55,10 @@ user2 = "00000000000000000000000000000002"
user3 = "00000000000000000000000000000003"
```
4. Сохранить конфиг. Ctrl+S -> Ctrl+X. Перезапускать telemt не нужно.
5. Получить ссылки через `journalctl -u telemt -n -g "links" --no-pager -o cat | tac`
5. Получить ссылки через
```bash
curl -s http://127.0.0.1:9091/v1/users | jq
```
## Как посмотреть метрики

View File

@@ -0,0 +1,92 @@
# Öffentliche TELEMT-Lizenz 3
***Alle Rechte vorbehalten (c) 2026 Telemt***
Hiermit wird jeder Person, die eine Kopie dieser Software und der dazugehörigen Dokumentation (nachfolgend "Software") erhält, unentgeltlich die Erlaubnis erteilt, die Software ohne Einschränkungen zu nutzen, einschließlich des Rechts, die Software zu verwenden, zu vervielfältigen, zu ändern, abgeleitete Werke zu erstellen, zu verbinden, zu veröffentlichen, zu verbreiten, zu unterlizenzieren und/oder Kopien der Software zu verkaufen sowie diese Rechte auch denjenigen einzuräumen, denen die Software zur Verfügung gestellt wird, vorausgesetzt, dass sämtliche Urheberrechtshinweise sowie die Bedingungen und Bestimmungen dieser Lizenz eingehalten werden.
### Begriffsbestimmungen
Für die Zwecke dieser Lizenz gelten die folgenden Definitionen:
**"Software" (Software)** — die Telemt-Software einschließlich Quellcode, Dokumentation und sämtlicher zugehöriger Dateien, die unter den Bedingungen dieser Lizenz verbreitet werden.
**"Contributor" (Contributor)** — jede natürliche oder juristische Person, die Code, Patches, Dokumentation oder andere Materialien eingereicht hat, die von den Maintainers des Projekts angenommen und in die Software aufgenommen wurden.
**"Beitrag" (Contribution)** — jedes urheberrechtlich geschützte Werk, das bewusst zur Aufnahme in die Software eingereicht wurde.
**"Modifizierte Version" (Modified Version)** — jede Version der Software, die gegenüber der ursprünglichen Software geändert, angepasst, erweitert oder anderweitig modifiziert wurde.
**"Maintainers" (Maintainers)** — natürliche oder juristische Personen, die für das offizielle Telemt-Projekt und dessen offizielle Veröffentlichungen verantwortlich sind.
### 1 Urheberrechtshinweis (Attribution)
Bei der Weitergabe der Software, sowohl in Form des Quellcodes als auch in binärer Form, MÜSSEN folgende Elemente erhalten bleiben:
- der oben genannte Urheberrechtshinweis;
- der vollständige Text dieser Lizenz;
- sämtliche bestehenden Hinweise auf Urheberschaft.
### 2 Hinweis auf Modifikationen
Wenn Änderungen an der Software vorgenommen werden, MUSS die Person, die diese Änderungen vorgenommen hat, eindeutig darauf hinweisen, dass die Software modifiziert wurde, und eine kurze Beschreibung der vorgenommenen Änderungen beifügen.
Modifizierte Versionen der Software DÜRFEN NICHT als die originale Version von Telemt dargestellt werden.
### 3 Marken und Bezeichnungen
Diese Lizenz GEWÄHRT KEINE Rechte zur Nutzung der Bezeichnung **"Telemt"**, des Telemt-Logos oder sonstiger Marken, Kennzeichen oder Branding-Elemente von Telemt.
Weiterverbreitete oder modifizierte Versionen der Software DÜRFEN die Bezeichnung Telemt nicht in einer Weise verwenden, die bei Nutzern den Eindruck eines offiziellen Ursprungs oder einer Billigung durch das Telemt-Projekt erwecken könnte, sofern hierfür keine ausdrückliche Genehmigung der Maintainers vorliegt.
Die Verwendung der Bezeichnung **Telemt** zur Beschreibung einer modifizierten Version der Software ist nur zulässig, wenn diese Version eindeutig als modifiziert oder inoffiziell gekennzeichnet ist.
Jegliche Verbreitung, die Nutzer vernünftigerweise darüber täuschen könnte, dass es sich um eine offizielle Veröffentlichung von Telemt handelt, ist untersagt.
### 4 Transparenz bei der Verbreitung von Binärversionen
Im Falle der Verbreitung kompilierter Binärversionen der Software wird der Verbreiter HIERMIT ERMUTIGT (encouraged), soweit dies vernünftigerweise möglich ist, Zugang zum entsprechenden Quellcode sowie zu den Build-Anweisungen bereitzustellen.
Diese Praxis trägt zur Transparenz bei und ermöglicht es Empfängern, die Integrität und Reproduzierbarkeit der verbreiteten Builds zu überprüfen.
## 5 Gewährung einer Patentlizenz und Beendigung von Rechten
Jeder Contributor gewährt den Empfängern der Software eine unbefristete, weltweite, nicht-exklusive, unentgeltliche, lizenzgebührenfreie und unwiderrufliche Patentlizenz für:
- die Herstellung,
- die Beauftragung der Herstellung,
- die Nutzung,
- das Anbieten zum Verkauf,
- den Verkauf,
- den Import,
- sowie jede sonstige Verbreitung der Software.
Diese Patentlizenz erstreckt sich ausschließlich auf solche Patentansprüche, die notwendigerweise durch den jeweiligen Beitrag des Contributors allein oder in Kombination mit der Software verletzt würden.
Leitet eine Person ein Patentverfahren ein oder beteiligt sich daran, einschließlich Gegenklagen oder Kreuzklagen, mit der Behauptung, dass die Software oder ein darin enthaltener Beitrag ein Patent verletzt, **erlöschen sämtliche durch diese Lizenz gewährten Rechte für diese Person unmittelbar mit Einreichung der Klage**.
Darüber hinaus erlöschen alle durch diese Lizenz gewährten Rechte **automatisch**, wenn eine Person ein gerichtliches Verfahren einleitet, in dem behauptet wird, dass die Software selbst ein Patent oder andere Rechte des geistigen Eigentums verletzt.
### 6 Beteiligung und Beiträge zur Entwicklung
Sofern ein Contributor nicht ausdrücklich etwas anderes erklärt, gilt jeder Beitrag, der bewusst zur Aufnahme in die Software eingereicht wird, als unter den Bedingungen dieser Lizenz lizenziert.
Durch die Einreichung eines Beitrags gewährt der Contributor den Maintainers des Telemt-Projekts sowie allen Empfängern der Software die in dieser Lizenz beschriebenen Rechte in Bezug auf diesen Beitrag.
### 7 Urheberhinweis bei Netzwerk- und Servicenutzung
Wird die Software zur Bereitstellung eines öffentlich zugänglichen Netzwerkdienstes verwendet, MUSS der Betreiber dieses Dienstes einen Hinweis auf die Urheberschaft von Telemt an mindestens einer der folgenden Stellen anbringen:
* in der Servicedokumentation;
* in der Dienstbeschreibung;
* auf einer Seite "Über" oder einer vergleichbaren Informationsseite;
* in anderen für Nutzer zugänglichen Materialien, die in angemessenem Zusammenhang mit dem Dienst stehen.
Ein solcher Hinweis DARF NICHT den Eindruck erwecken, dass der Dienst vom Telemt-Projekt oder dessen Maintainers unterstützt oder offiziell gebilligt wird.
### 8 Haftungsausschluss und salvatorische Klausel
DIE SOFTWARE WIRD "WIE BESEHEN" BEREITGESTELLT, OHNE JEGLICHE AUSDRÜCKLICHE ODER STILLSCHWEIGENDE GEWÄHRLEISTUNG, EINSCHLIESSLICH, ABER NICHT BESCHRÄNKT AUF GEWÄHRLEISTUNGEN DER MARKTGÄNGIGKEIT, DER EIGNUNG FÜR EINEN BESTIMMTEN ZWECK UND DER NICHTVERLETZUNG VON RECHTEN.
IN KEINEM FALL HAFTEN DIE AUTOREN ODER RECHTEINHABER FÜR IRGENDWELCHE ANSPRÜCHE, SCHÄDEN ODER SONSTIGE HAFTUNG, DIE AUS VERTRAG, UNERLAUBTER HANDLUNG ODER AUF ANDERE WEISE AUS DER SOFTWARE ODER DER NUTZUNG DER SOFTWARE ENTSTEHEN.
SOLLTE EINE BESTIMMUNG DIESER LIZENZ ALS UNWIRKSAM ODER NICHT DURCHSETZBAR ANGESEHEN WERDEN, IST DIESE BESTIMMUNG SO AUSZULEGEN, DASS SIE DEM URSPRÜNGLICHEN WILLEN DER PARTEIEN MÖGLICHST NAHEKOMMT; DIE ÜBRIGEN BESTIMMUNGEN BLEIBEN DAVON UNBERÜHRT UND IN VOLLER WIRKUNG.

143
docs/LICENSE/LICENSE.en.md Normal file
View File

@@ -0,0 +1,143 @@
###### TELEMT Public License 3 ######
##### Copyright (c) 2026 Telemt #####
Permission is hereby granted, free of charge, to any person obtaining a copy
of this Software and associated documentation files (the "Software"),
to use, reproduce, modify, prepare derivative works of, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, provided that all
copyright notices, license terms, and conditions set forth in this License
are preserved and complied with.
### Official Translations
The canonical version of this License is the English version.
Official translations are provided for informational purposes only
and for convenience, and do not have legal force. In case of any
discrepancy, the English version of this License shall prevail.
Available versions:
- English in Markdown: docs/LICENSE/LICENSE.md
- German: docs/LICENSE/LICENSE.de.md
- Russian: docs/LICENSE/LICENSE.ru.md
### Definitions
For the purposes of this License:
"Software" means the Telemt software, including source code, documentation,
and any associated files distributed under this License.
"Contributor" means any person or entity that submits code, patches,
documentation, or other contributions to the Software that are accepted
into the Software by the maintainers.
"Contribution" means any work of authorship intentionally submitted
to the Software for inclusion in the Software.
"Modified Version" means any version of the Software that has been
changed, adapted, extended, or otherwise modified from the original
Software.
"Maintainers" means the individuals or entities responsible for
the official Telemt project and its releases.
#### 1 Attribution
Redistributions of the Software, in source or binary form, MUST RETAIN the
above copyright notice, this license text, and any existing attribution
notices.
#### 2 Modification Notice
If you modify the Software, you MUST clearly state that the Software has been
modified and include a brief description of the changes made.
Modified versions MUST NOT be presented as the original Telemt.
#### 3 Trademark and Branding
This license DOES NOT grant permission to use the name "Telemt",
the Telemt logo, or any Telemt trademarks or branding.
Redistributed or modified versions of the Software MAY NOT use the Telemt
name in a way that suggests endorsement or official origin without explicit
permission from the Telemt maintainers.
Use of the name "Telemt" to describe a modified version of the Software
is permitted only if the modified version is clearly identified as a
modified or unofficial version.
Any distribution that could reasonably confuse users into believing that
the software is an official Telemt release is prohibited.
#### 4 Binary Distribution Transparency
If you distribute compiled binaries of the Software,
you are ENCOURAGED to provide access to the corresponding
source code and build instructions where reasonably possible.
This helps preserve transparency and allows recipients to verify the
integrity and reproducibility of distributed builds.
#### 5 Patent Grant and Defensive Termination Clause
Each contributor grants you a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Software.
This patent license applies only to those patent claims necessarily
infringed by the contributors contribution alone or by combination of
their contribution with the Software.
If you initiate or participate in any patent litigation, including
cross-claims or counterclaims, alleging that the Software or any
contribution incorporated within the Software constitutes patent
infringement, then **all rights granted to you under this license shall
terminate immediately** as of the date such litigation is filed.
Additionally, if you initiate legal action alleging that the
Software itself infringes your patent or other intellectual
property rights, then all rights granted to you under this
license SHALL TERMINATE automatically.
#### 6 Contributions
Unless you explicitly state otherwise, any Contribution intentionally
submitted for inclusion in the Software shall be licensed under the terms
of this License.
By submitting a Contribution, you grant the Telemt maintainers and all
recipients of the Software the rights described in this License with
respect to that Contribution.
#### 7 Network Use Attribution
If the Software is used to provide a publicly accessible network service,
the operator of such service MUST provide attribution to Telemt in at least
one of the following locations:
- service documentation
- service description
- an "About" or similar informational page
- other user-visible materials reasonably associated with the service
Such attribution MUST NOT imply endorsement by the Telemt project or its
maintainers.
#### 8 Disclaimer of Warranty and Severability Clause
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE
IF ANY PROVISION OF THIS LICENSE IS HELD TO BE INVALID OR UNENFORCEABLE,
SUCH PROVISION SHALL BE INTERPRETED TO REFLECT THE ORIGINAL INTENT
OF THE PARTIES AS CLOSELY AS POSSIBLE, AND THE REMAINING PROVISIONS
SHALL REMAIN IN FULL FORCE AND EFFECT

View File

@@ -0,0 +1,90 @@
# Публичная лицензия TELEMT 3
***Все права защищёны (c) 2026 Telemt***
Настоящим любому лицу, получившему копию данного программного обеспечения и сопутствующей документации (далее — "Программное обеспечение"), безвозмездно предоставляется разрешение использовать Программное обеспечение без ограничений, включая право использовать, воспроизводить, изменять, создавать производные произведения, объединять, публиковать, распространять, сублицензировать и (или) продавать копии Программного обеспечения, а также предоставлять такие права лицам, которым предоставляется Программное обеспечение, при условии соблюдения всех уведомлений об авторских правах, условий и положений настоящей Лицензии.
### Определения
Для целей настоящей Лицензии применяются следующие определения:
**"Программное обеспечение" (Software)** — программное обеспечение Telemt, включая исходный код, документацию и любые связанные файлы, распространяемые на условиях настоящей Лицензии.
**"Контрибьютор" (Contributor)** — любое физическое или юридическое лицо, направившее код, исправления (патчи), документацию или иные материалы, которые были приняты мейнтейнерами проекта и включены в состав Программного обеспечения.
**"Вклад" (Contribution)** — любое произведение авторского права, намеренно представленное для включения в состав Программного обеспечения.
**"Модифицированная версия" (Modified Version)** — любая версия Программного обеспечения, которая была изменена, адаптирована, расширена или иным образом модифицирована по сравнению с исходным Программным обеспечением.
**"Мейнтейнеры" (Maintainers)** — физические или юридические лица, ответственные за официальный проект Telemt и его официальные релизы.
### 1 Указание авторства
При распространении Программного обеспечения, как в форме исходного кода, так и в бинарной форме, ДОЛЖНЫ СОХРАНЯТЬСЯ:
- указанное выше уведомление об авторских правах;
- текст настоящей Лицензии;
- любые существующие уведомления об авторстве.
### 2 Уведомление о модификации
В случае внесения изменений в Программное обеспечение лицо, осуществившее такие изменения, ОБЯЗАНО явно указать, что Программное обеспечение было модифицировано, а также включить краткое описание внесённых изменений.
Модифицированные версии Программного обеспечения НЕ ДОЛЖНЫ представляться как оригинальная версия Telemt.
### 3 Товарные знаки и обозначения
Настоящая Лицензия НЕ ПРЕДОСТАВЛЯЕТ права использовать наименование **"Telemt"**, логотип Telemt, а также любые товарные знаки, фирменные обозначения или элементы бренда Telemt.
Распространяемые или модифицированные версии Программного обеспечения НЕ ДОЛЖНЫ использовать наименование Telemt таким образом, который может создавать у пользователей впечатление официального происхождения либо одобрения со стороны проекта Telemt без явного разрешения мейнтейнеров проекта.
Использование наименования **Telemt** для описания модифицированной версии Программного обеспечения допускается только при условии, что такая версия ясно обозначена как модифицированная или неофициальная.
Запрещается любое распространение, которое может разумно вводить пользователей в заблуждение относительно того, что программное обеспечение является официальным релизом Telemt.
### 4 Прозрачность распространения бинарных версий
В случае распространения скомпилированных бинарных версий Программного обеспечения распространитель НАСТОЯЩИМ ПОБУЖДАЕТСЯ предоставлять доступ к соответствующему исходному коду и инструкциям по сборке, если это разумно возможно.
Такая практика способствует прозрачности распространения и позволяет получателям проверять целостность и воспроизводимость распространяемых сборок.
### 5 Предоставление патентной лицензии и прекращение прав
Каждый контрибьютор предоставляет получателям Программного обеспечения бессрочную, всемирную, неисключительную, безвозмездную, не требующую выплаты роялти и безотзывную патентную лицензию на:
- изготовление,
- поручение изготовления,
- использование,
- предложение к продаже,
- продажу,
- импорт,
- и иное распространение Программного обеспечения.
Такая патентная лицензия распространяется исключительно на те патентные требования, которые неизбежно нарушаются соответствующим вкладом контрибьютора как таковым либо его сочетанием с Программным обеспечением.
Если лицо инициирует либо участвует в каком-либо судебном разбирательстве по патентному спору, включая встречные или перекрёстные иски, утверждая, что Программное обеспечение либо любой вклад, включённый в него, нарушает патент, **все права, предоставленные такому лицу настоящей Лицензией, немедленно прекращаются** с даты подачи соответствующего иска.
Кроме того, если лицо инициирует судебное разбирательство, утверждая, что само Программное обеспечение нарушает его патентные либо иные права интеллектуальной собственности, все права, предоставленные настоящей Лицензией, **автоматически прекращаются**.
### 6 Участие и вклад в разработку
Если контрибьютор явно не указал иное, любой Вклад, намеренно представленный для включения в Программное обеспечение, считается лицензированным на условиях настоящей Лицензии.
Путём предоставления Вклада контрибьютор предоставляет мейнтейнером проекта Telemt и всем получателям Программного обеспечения права, предусмотренные настоящей Лицензией, в отношении такого Вклада.
### 7 Указание авторства при сетевом и сервисном использовании
В случае использования Программного обеспечения для предоставления публично доступного сетевого сервиса оператор такого сервиса ОБЯЗАН обеспечить указание авторства Telemt как минимум в одном из следующих мест:
- документация сервиса;
- описание сервиса;
- страница "О программе" или аналогичная информационная страница;
- иные материалы, доступные пользователям и разумно связанные с данным сервисом.
Такое указание авторства НЕ ДОЛЖНО создавать впечатление одобрения или официальной поддержки со стороны проекта Telemt либо его мейнтейнеров.
### 8 Отказ от гарантий и делимость положений
ПРОГРАММНОЕ ОБЕСПЕЧЕНИЕ ПРЕДОСТАВЛЯЕТСЯ "КАК ЕСТЬ", БЕЗ КАКИХ-ЛИБО ГАРАНТИЙ, ЯВНЫХ ИЛИ ПОДРАЗУМЕВАЕМЫХ, ВКЛЮЧАЯ, НО НЕ ОГРАНИЧИВАЯСЬ ГАРАНТИЯМИ КОММЕРЧЕСКОЙ ПРИГОДНОСТИ, ПРИГОДНОСТИ ДЛЯ КОНКРЕТНОЙ ЦЕЛИ И НЕНАРУШЕНИЯ ПРАВ.
НИ ПРИ КАКИХ ОБСТОЯТЕЛЬСТВАХ АВТОРЫ ИЛИ ПРАВООБЛАДАТЕЛИ НЕ НЕСУТ ОТВЕТСТВЕННОСТИ ПО КАКИМ-ЛИБО ТРЕБОВАНИЯМ, УБЫТКАМ ИЛИ ИНОЙ ОТВЕТСТВЕННОСТИ, ВОЗНИКАЮЩЕЙ В РЕЗУЛЬТАТЕ ДОГОВОРА, ДЕЛИКТА ИЛИ ИНЫМ ОБРАЗОМ, СВЯЗАННЫМ С ПРОГРАММНЫМ ОБЕСПЕЧЕНИЕМ ИЛИ ЕГО ИСПОЛЬЗОВАНИЕМ.
В СЛУЧАЕ ЕСЛИ КАКОЕ-ЛИБО ПОЛОЖЕНИЕ НАСТОЯЩЕЙ ЛИЦЕНЗИИ ПРИЗНАЁТСЯ НЕДЕЙСТВИТЕЛЬНЫМ ИЛИ НЕПРИМЕНИМЫМ, ТАКОЕ ПОЛОЖЕНИЕ ПОДЛЕЖИТ ТОЛКОВАНИЮ МАКСИМАЛЬНО БЛИЗКО К ИСХОДНОМУ НАМЕРЕНИЮ СТОРОН, ПРИ ЭТОМ ОСТАЛЬНЫЕ ПОЛОЖЕНИЯ СОХРАНЯЮТ ПОЛНУЮ ЮРИДИЧЕСКУЮ СИЛУ.

132
docs/OPENBSD.en.md Normal file
View File

@@ -0,0 +1,132 @@
# Telemt on OpenBSD (Build, Run, and rc.d)
This guide covers a practical OpenBSD deployment flow for Telemt:
- build from source,
- install binary and config,
- run as an rc.d daemon,
- verify basic runtime behavior.
## 1. Prerequisites
Install required packages:
```sh
doas pkg_add rust git
```
Notes:
- Telemt release installer (`install.sh`) is Linux-only.
- On OpenBSD, use source build with `cargo`.
## 2. Build from source
```sh
git clone https://github.com/telemt/telemt
cd telemt
cargo build --release
./target/release/telemt --version
```
For low-RAM systems, this repository already uses `lto = "thin"` in release profile.
## 3. Install binary and config
```sh
doas install -d -m 0755 /usr/local/bin
doas install -m 0755 ./target/release/telemt /usr/local/bin/telemt
doas install -d -m 0750 /etc/telemt
doas install -m 0640 ./config.toml /etc/telemt/config.toml
```
## 4. Create runtime user
```sh
doas useradd -L daemon -s /sbin/nologin -d /var/empty _telemt
```
If `_telemt` already exists, continue.
## 5. Install rc.d service
Install the provided script:
```sh
doas install -m 0555 ./contrib/openbsd/telemt.rcd /etc/rc.d/telemt
```
Enable and start:
```sh
doas rcctl enable telemt
# Optional: send daemon output to syslog
#doas rcctl set telemt logger daemon.info
doas rcctl start telemt
```
Service controls:
```sh
doas rcctl check telemt
doas rcctl restart telemt
doas rcctl stop telemt
```
## 6. Resource limits (recommended)
OpenBSD rc.d can apply limits via login class. Add class `telemt` and assign it to `_telemt`.
Example class entry:
```text
telemt:\
:openfiles-cur=8192:openfiles-max=16384:\
:datasize-cur=768M:datasize-max=1024M:\
:coredumpsize=0:\
:tc=daemon:
```
These values are conservative defaults for small and medium deployments.
Increase `openfiles-*` only if logs show descriptor exhaustion under load.
Then rebuild database and assign class:
```sh
doas cap_mkdb /etc/login.conf
#doas usermod -L telemt _telemt
```
Uncomment `usermod` if you want this class bound to the Telemt user.
## 7. Functional smoke test
1. Validate service state:
```sh
doas rcctl check telemt
```
2. Check listener is present (replace 443 if needed):
```sh
netstat -n -f inet -p tcp | grep LISTEN | grep '\.443'
```
3. Verify process user:
```sh
ps -o user,pid,command -ax | grep telemt | grep -v grep
```
4. If startup fails, debug in foreground:
```sh
RUST_LOG=debug /usr/local/bin/telemt /etc/telemt/config.toml
```
## 8. OpenBSD-specific caveats
- OpenBSD does not support per-socket keepalive retries/interval tuning in the same way as Linux.
- Telemt source already uses target-aware cfg gates for keepalive setup.
- Use rc.d/rcctl, not systemd.

View File

@@ -48,11 +48,16 @@ Save the obtained result somewhere. You will need it later!
---
**1. Place your config to /etc/telemt.toml**
**1. Place your config to /etc/telemt/telemt.toml**
Create config directory:
```bash
mkdir /etc/telemt
```
Open nano
```bash
nano /etc/telemt.toml
nano /etc/telemt/telemt.toml
```
paste your config
@@ -67,6 +72,9 @@ classic = false
secure = false
tls = true
[server]
port = 443
[server.api]
enabled = true
# listen = "127.0.0.1:9091"
@@ -90,7 +98,14 @@ then Ctrl+S -> Ctrl+X to save
---
**2. Create service on /etc/systemd/system/telemt.service**
**2. Create telemt user**
```bash
useradd -d /opt/telemt -m -r -U telemt
chown -R telemt:telemt /etc/telemt
```
**3. Create service on /etc/systemd/system/telemt.service**
Open nano
```bash
@@ -101,28 +116,38 @@ paste this Systemd Module
```bash
[Unit]
Description=Telemt
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
WorkingDirectory=/bin
ExecStart=/bin/telemt /etc/telemt.toml
User=telemt
Group=telemt
WorkingDirectory=/opt/telemt
ExecStart=/bin/telemt /etc/telemt/telemt.toml
Restart=on-failure
LimitNOFILE=65536
AmbientCapabilities=CAP_NET_BIND_SERVICE
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
NoNewPrivileges=true
[Install]
WantedBy=multi-user.target
```
then Ctrl+S -> Ctrl+X to save
reload systemd units
```bash
systemctl daemon-reload
```
**3.** To start it, enter the command `systemctl start telemt`
**4.** To start it, enter the command `systemctl start telemt`
**4.** To get status information, enter `systemctl status telemt`
**5.** To get status information, enter `systemctl status telemt`
**5.** For automatic startup at system boot, enter `systemctl enable telemt`
**6.** For automatic startup at system boot, enter `systemctl enable telemt`
**6.** To get the link(s), enter
**7.** To get the link(s), enter
```bash
curl -s http://127.0.0.1:9091/v1/users | jq
```

View File

@@ -48,11 +48,16 @@ python3 -c 'import os; print(os.urandom(16).hex())'
---
**1. Поместите свою конфигурацию в файл /etc/telemt.toml**
**1. Поместите свою конфигурацию в файл /etc/telemt/telemt.toml**
Создаём директорию для конфига:
```bash
mkdir /etc/telemt
```
Открываем nano
```bash
nano /etc/telemt.toml
nano /etc/telemt/telemt.toml
```
Вставьте свою конфигурацию
@@ -67,6 +72,9 @@ classic = false
secure = false
tls = true
[server]
port = 443
[server.api]
enabled = true
# listen = "127.0.0.1:9091"
@@ -90,7 +98,14 @@ hello = "00000000000000000000000000000000"
---
**2. Создайте службу в /etc/systemd/system/telemt.service**
**2. Создайте пользователя для telemt**
```bash
useradd -d /opt/telemt -m -r -U telemt
chown -R telemt:telemt /etc/telemt
```
**3. Создайте службу в /etc/systemd/system/telemt.service**
Открываем nano
```bash
@@ -101,35 +116,45 @@ nano /etc/systemd/system/telemt.service
```bash
[Unit]
Description=Telemt
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
WorkingDirectory=/bin
ExecStart=/bin/telemt /etc/telemt.toml
User=telemt
Group=telemt
WorkingDirectory=/opt/telemt
ExecStart=/bin/telemt /etc/telemt/telemt.toml
Restart=on-failure
LimitNOFILE=65536
AmbientCapabilities=CAP_NET_BIND_SERVICE
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
NoNewPrivileges=true
[Install]
WantedBy=multi-user.target
```
Затем нажмите Ctrl+S -> Ctrl+X, чтобы сохранить
перезагрузите конфигурацию systemd
```bash
systemctl daemon-reload
```
**3.** Для запуска введите команду `systemctl start telemt`
**4.** Для запуска введите команду `systemctl start telemt`
**4.** Для получения информации о статусе введите `systemctl status telemt`
**5.** Для получения информации о статусе введите `systemctl status telemt`
**5.** Для автоматического запуска при запуске системы в введите `systemctl enable telemt`
**6.** Для автоматического запуска при запуске системы в введите `systemctl enable telemt`
**6.** Для получения ссылки/ссылок введите
**7.** Для получения ссылки/ссылок введите
```bash
curl -s http://127.0.0.1:9091/v1/users | jq
```
> Одной ссылкой может пользоваться сколько угодно человек.
> [!WARNING]
> Рабочую ссылку может выдать только команда из 6 пункта. Не пытайтесь делать ее самостоятельно или копировать откуда-либо если вы не уверены в том, что делаете!
> Рабочую ссылку может выдать только команда из 7 пункта. Не пытайтесь делать ее самостоятельно или копировать откуда-либо если вы не уверены в том, что делаете!
---

View File

@@ -0,0 +1,274 @@
# TLS-F и TCP-S в Telemt
## Общая архитектура
**Telemt** - это прежде всего реализация **MTProxy**, через которую проходит payload Telegram
Подсистема **TLS-Fronting / TCP-Splitting** служит **маскировочным транспортным слоем**, задача которого - сделать MTProxy-соединение внешне похожим на обычное TLS-подключение к легитимному сайту
Таким образом:
- **MTProxy** - основной функциональный слой Telemt для обработки Telegram-трафика
- **TLS-Fronting / TCP-Splitting** - подсистема маскировки транспорта
С точки зрения сети Telemt ведёт себя как **TLS-сервер**, но фактически:
- валидные MTProxy-клиенты остаются внутри контура Telemt
- любые другие TLS-клиенты проксируются на обычный HTTPS-сервер-заглушку
# Базовый сценарий / Best-practice
Предположим, у вас есть домен:
```
umweltschutz.de
```
### 1 DNS
Вы создаёте A-запись:
```
umweltschutz.de -> A-запись 198.18.88.88
```
где `198.18.88.88` - IP вашего сервера с telemt
### 2 TLS-домен
В конфигурации Telemt:
```
tls_domain = umweltschutz.de
```
Этот домен используется клиентом как SNI в ClientHello
### 3 Сервер-заглушка
Вы поднимаете обычный HTTPS-сервер, например **nginx**, с сертификатом для этого домена.
Он может работать:
- на том же сервере
- на другом сервере
- на другом порту
В конфигурации Telemt:
```
mask_host = 127.0.0.1
mask_port = 8443
```
где `127.0.0.1` - IP сервера-заглушки, а 8443 - порт, который он слушает
Этот сервер нужен **для обработки любых non-MTProxy запросов**
### 4 Работа Telemt
После запуска Telemt действует следующим образом:
1) принимает входящее TCP-соединение
2) анализирует TLS-ClientHello
3) пытается определить, является ли соединение валидным **MTProxy FakeTLS**
Далее работают два варианта логики:
---
# Сценарий 1 - MTProxy клиент с валидным ключом
Если клиент предъявил **валидный MTProxy-ключ**:
- соединение **остаётся внутри Telemt**
- TLS используется только как **транспортная маскировка**
- далее запускается обычная логика **MTProxy**
Для внешнего наблюдателя это выглядит как:
```
TLS connection -> umweltschutz.de
```
Хотя внутри передаётся **MTProto-трафик Telegram**
# Сценарий 2 - обычный TLS-клиент - crawler / scanner / browser
Если Telemt не обнаруживает валидный MTProxy-ключ:
соединение **переключается в режим TCP-Splitting / TCP-Splicing**.
В этом режиме Telemt:
1. открывает новое TCP-соединение к
```
mask_host:mask_port
```
2. начинает **проксировать TCP-трафик**
Важно:
* клиентский TLS-запрос **НЕ модифицируется**
* **ClientHello передаётся "как есть", без изменений**
* **SNI остаётся неизменным**
* Telemt **не завершает TLS-рукопожатие**, а только перенаправляет его на более низком уровне сетевого стека - L4
Таким образом upstream-сервер получает **оригинальное TLS-соединение клиента**:
- если это nginx-заглушка, он просто отдаёт обычный сайт
- для внешнего наблюдателя это выглядит как обычный HTTPS-сервер
# TCP-S / TCP-Splitting / TCP-Splicing
Ключевые свойства механизма:
**Telemt работает как TCP-переключатель:**
1) принимает соединение
2) определяет тип клиента
3) либо:
- обрабатывает MTProxy внутри
- либо проксирует TCP-поток
При проксировании:
- Telemt **разрешает `mask_host` в IP**
- устанавливает TCP-соединение
- начинает **bidirectional TCP relay**
При этом:
- TLS-рукопожатие происходит **между клиентом и `mask_host`**
- Telemt выступает только **на уровне L4 - как TCP-релей**, такой же как HAProxy в TCP-режиме
# Использование чужого домена
Можно использовать и внешний сайт.
Например:
```
tls_domain = github.com
mask_host = github.com
mask_port = 443
```
или
```
mask_host = 140.82.121.4
```
В этом случае:
- цензор видит **TLS-подключение к github.com**
- обычные клиенты/краулер действительно получают **настоящий GitHub**
Telemt просто **проксирует TCP-соединение на GitHub**
# Что видит анализатор трафика?
Для DPI это выглядит так:
```
client -> TLS -> github.com
```
или
```
client -> TLS -> umweltschutz.de
```
TLS-handshake выглядит валидным, SNI соответствует домену, сертификат корректный - от целевого `mask_host:mask_port`
# Что видит сканер / краулер?
Если сканер попытается подключиться:
```
openssl s_client -connect 198.18.88.88:443 -servername umweltschutz.de
```
он получит **обычный HTTPS-сайт-заглушку**
Потому что:
- он не предъявил MTProxy-ключ
- Telemt отправил соединение на `mask_host:mask_port`, на котором находится nginx
# Какую проблему решает TLS-Fronting / TCP-Splitting?
Эта архитектура решает сразу несколько проблем обхода цензуры.
## 1 Закрытие плоскости MTProxy от активного сканирования
Многие цензоры:
- сканируют IP-адреса
- проверяют известные сигнатуры прокси
Telemt отвечает на такие проверки **обычным HTTPS-сайтом**, поэтому прокси невозможно обнаружить простым сканированием
---
## 2 Маскировка трафика под легитимный TLS
Для DPI-систем соединение выглядит как:
```
обычный TLS-трафик к популярному домену
```
Это делает блокировку значительно сложнее и непредсказуемее
---
## 3 Устойчивость к протокольному анализу
MTProxy трафик проходит **внутри TLS-like-потока**, поэтому:
- не видны характерные сигнатуры MTProto
- соединение выглядит как обычный HTTPS
---
## 4 Правдоподобное поведение сервера
Даже если краулер:
- подключится сам
- выполнит TLS-handshake
- попытается получить HTTP-ответ
он увидит **реальный сайт**, а не telemt
Это устраняет один из главных признаков для антифрод-краулеров мобильных операторов
# Схема
```text
Client
│ TCP
V
Telemt
├── valid MTProxy key
│ │
│ V
│ MTProxy logic
└── обычный TLS клиент
V
TCP-Splitting
V
mask_host:mask_port
```

View File

@@ -19,6 +19,15 @@ need_cmd() {
command -v "$1" >/dev/null 2>&1 || die "required command not found: $1"
}
detect_os() {
os="$(uname -s)"
case "$os" in
Linux) printf 'linux\n' ;;
OpenBSD) printf 'openbsd\n' ;;
*) printf '%s\n' "$os" ;;
esac
}
detect_arch() {
arch="$(uname -m)"
case "$arch" in
@@ -68,6 +77,19 @@ need_cmd grep
need_cmd install
ARCH="$(detect_arch)"
OS="$(detect_os)"
if [ "$OS" != "linux" ]; then
case "$OS" in
openbsd)
die "install.sh installs only Linux release artifacts. On OpenBSD, build from source (see docs/OPENBSD.en.md)."
;;
*)
die "unsupported operating system for install.sh: $OS"
;;
esac
fi
LIBC="$(detect_libc)"
case "$VERSION" in

View File

@@ -16,6 +16,7 @@ use tracing::{debug, info, warn};
use crate::config::ProxyConfig;
use crate::ip_tracker::UserIpTracker;
use crate::proxy::route_mode::RouteRuntimeController;
use crate::startup::StartupTracker;
use crate::stats::Stats;
use crate::transport::middle_proxy::MePool;
@@ -84,6 +85,7 @@ pub(super) struct ApiShared {
pub(super) request_id: Arc<AtomicU64>,
pub(super) runtime_state: Arc<ApiRuntimeState>,
pub(super) startup_tracker: Arc<StartupTracker>,
pub(super) route_runtime: Arc<RouteRuntimeController>,
}
impl ApiShared {
@@ -101,6 +103,7 @@ pub async fn serve(
stats: Arc<Stats>,
ip_tracker: Arc<UserIpTracker>,
me_pool: Arc<RwLock<Option<Arc<MePool>>>>,
route_runtime: Arc<RouteRuntimeController>,
upstream_manager: Arc<UpstreamManager>,
config_rx: watch::Receiver<Arc<ProxyConfig>>,
admission_rx: watch::Receiver<bool>,
@@ -147,6 +150,7 @@ pub async fn serve(
request_id: Arc::new(AtomicU64::new(1)),
runtime_state: runtime_state.clone(),
startup_tracker,
route_runtime,
});
spawn_runtime_watchers(
@@ -338,7 +342,7 @@ async fn handle(
}
("GET", "/v1/runtime/me-selftest") => {
let revision = current_revision(&shared.config_path).await?;
let data = build_runtime_me_selftest_data(shared.as_ref()).await;
let data = build_runtime_me_selftest_data(shared.as_ref(), cfg.as_ref()).await;
Ok(success_response(StatusCode::OK, data, revision))
}
("GET", "/v1/runtime/connections/summary") => {

View File

@@ -236,6 +236,8 @@ pub(super) struct MeWritersSummary {
pub(super) required_writers: usize,
pub(super) alive_writers: usize,
pub(super) coverage_pct: f64,
pub(super) fresh_alive_writers: usize,
pub(super) fresh_coverage_pct: f64,
}
#[derive(Serialize, Clone)]
@@ -250,6 +252,12 @@ pub(super) struct MeWriterStatus {
pub(super) bound_clients: usize,
pub(super) idle_for_secs: Option<u64>,
pub(super) rtt_ema_ms: Option<f64>,
pub(super) matches_active_generation: bool,
pub(super) in_desired_map: bool,
pub(super) allow_drain_fallback: bool,
pub(super) drain_started_at_epoch_secs: Option<u64>,
pub(super) drain_deadline_epoch_secs: Option<u64>,
pub(super) drain_over_ttl: bool,
}
#[derive(Serialize, Clone)]
@@ -276,6 +284,8 @@ pub(super) struct DcStatus {
pub(super) floor_capped: bool,
pub(super) alive_writers: usize,
pub(super) coverage_pct: f64,
pub(super) fresh_alive_writers: usize,
pub(super) fresh_coverage_pct: f64,
pub(super) rtt_ms: Option<f64>,
pub(super) load: usize,
}

View File

@@ -1,11 +1,14 @@
use std::net::IpAddr;
use std::collections::HashMap;
use std::sync::{Mutex, OnceLock};
use std::time::{SystemTime, UNIX_EPOCH};
use serde::Serialize;
use crate::config::{ProxyConfig, UpstreamType};
use crate::network::probe::{detect_interface_ipv4, detect_interface_ipv6, is_bogon};
use crate::transport::middle_proxy::{bnd_snapshot, timeskew_snapshot};
use crate::transport::middle_proxy::{bnd_snapshot, timeskew_snapshot, upstream_bnd_snapshots};
use crate::transport::UpstreamRouteKind;
use super::ApiShared;
@@ -65,13 +68,26 @@ pub(super) struct RuntimeMeSelftestBndData {
pub(super) last_seen_age_secs: Option<u64>,
}
#[derive(Serialize)]
pub(super) struct RuntimeMeSelftestUpstreamData {
pub(super) upstream_id: usize,
pub(super) route_kind: &'static str,
pub(super) address: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub(super) bnd: Option<RuntimeMeSelftestBndData>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(super) ip: Option<String>,
}
#[derive(Serialize)]
pub(super) struct RuntimeMeSelftestPayload {
pub(super) kdf: RuntimeMeSelftestKdfData,
pub(super) timeskew: RuntimeMeSelftestTimeskewData,
pub(super) ip: RuntimeMeSelftestIpData,
pub(super) pid: RuntimeMeSelftestPidData,
pub(super) bnd: RuntimeMeSelftestBndData,
pub(super) bnd: Option<RuntimeMeSelftestBndData>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(super) upstreams: Option<Vec<RuntimeMeSelftestUpstreamData>>,
}
#[derive(Serialize)]
@@ -98,7 +114,10 @@ fn kdf_ewma_state() -> &'static Mutex<KdfEwmaState> {
KDF_EWMA_STATE.get_or_init(|| Mutex::new(KdfEwmaState::default()))
}
pub(super) async fn build_runtime_me_selftest_data(shared: &ApiShared) -> RuntimeMeSelftestData {
pub(super) async fn build_runtime_me_selftest_data(
shared: &ApiShared,
cfg: &ProxyConfig,
) -> RuntimeMeSelftestData {
let now_epoch_secs = now_epoch_secs();
if shared.me_pool.read().await.is_none() {
return RuntimeMeSelftestData {
@@ -139,7 +158,26 @@ pub(super) async fn build_runtime_me_selftest_data(shared: &ApiShared) -> Runtim
let pid = std::process::id();
let pid_state = if pid == 1 { "one" } else { "non-one" };
let bnd = bnd_snapshot();
let has_socks_upstreams = cfg.upstreams.iter().any(|upstream| {
upstream.enabled
&& matches!(
upstream.upstream_type,
UpstreamType::Socks4 { .. } | UpstreamType::Socks5 { .. }
)
});
let bnd = if has_socks_upstreams {
let snapshot = bnd_snapshot();
Some(RuntimeMeSelftestBndData {
addr_state: snapshot.addr_status,
port_state: snapshot.port_status,
last_addr: snapshot.last_addr.map(|value| value.to_string()),
last_seen_age_secs: snapshot.last_seen_age_secs,
})
} else {
None
};
let upstreams = build_upstream_selftest_data(shared);
RuntimeMeSelftestData {
enabled: true,
@@ -168,16 +206,41 @@ pub(super) async fn build_runtime_me_selftest_data(shared: &ApiShared) -> Runtim
pid,
state: pid_state,
},
bnd: RuntimeMeSelftestBndData {
addr_state: bnd.addr_status,
port_state: bnd.port_status,
last_addr: bnd.last_addr.map(|value| value.to_string()),
last_seen_age_secs: bnd.last_seen_age_secs,
},
bnd,
upstreams,
}),
}
}
fn build_upstream_selftest_data(shared: &ApiShared) -> Option<Vec<RuntimeMeSelftestUpstreamData>> {
let snapshot = shared.upstream_manager.try_api_snapshot()?;
if snapshot.summary.configured_total <= 1 {
return None;
}
let mut upstream_bnd_by_id: HashMap<usize, _> = upstream_bnd_snapshots()
.into_iter()
.map(|entry| (entry.upstream_id, entry))
.collect();
let mut rows = Vec::with_capacity(snapshot.upstreams.len());
for upstream in snapshot.upstreams {
let upstream_bnd = upstream_bnd_by_id.remove(&upstream.upstream_id);
rows.push(RuntimeMeSelftestUpstreamData {
upstream_id: upstream.upstream_id,
route_kind: map_route_kind(upstream.route_kind),
address: upstream.address,
bnd: upstream_bnd.as_ref().map(|entry| RuntimeMeSelftestBndData {
addr_state: entry.addr_status,
port_state: entry.port_status,
last_addr: entry.last_addr.map(|value| value.to_string()),
last_seen_age_secs: entry.last_seen_age_secs,
}),
ip: upstream_bnd.and_then(|entry| entry.last_ip.map(|value| value.to_string())),
});
}
Some(rows)
}
fn update_kdf_ewma(now_epoch_secs: u64, total_errors: u64) -> f64 {
let Ok(mut guard) = kdf_ewma_state().lock() else {
return 0.0;
@@ -216,6 +279,14 @@ fn classify_ip(ip: IpAddr) -> &'static str {
"good"
}
fn map_route_kind(value: UpstreamRouteKind) -> &'static str {
match value {
UpstreamRouteKind::Direct => "direct",
UpstreamRouteKind::Socks4 => "socks4",
UpstreamRouteKind::Socks5 => "socks5",
}
}
fn round3(value: f64) -> f64 {
(value * 1000.0).round() / 1000.0
}

View File

@@ -314,6 +314,8 @@ async fn get_minimal_payload_cached(
required_writers: status.required_writers,
alive_writers: status.alive_writers,
coverage_pct: status.coverage_pct,
fresh_alive_writers: status.fresh_alive_writers,
fresh_coverage_pct: status.fresh_coverage_pct,
},
writers: status
.writers
@@ -329,6 +331,12 @@ async fn get_minimal_payload_cached(
bound_clients: entry.bound_clients,
idle_for_secs: entry.idle_for_secs,
rtt_ema_ms: entry.rtt_ema_ms,
matches_active_generation: entry.matches_active_generation,
in_desired_map: entry.in_desired_map,
allow_drain_fallback: entry.allow_drain_fallback,
drain_started_at_epoch_secs: entry.drain_started_at_epoch_secs,
drain_deadline_epoch_secs: entry.drain_deadline_epoch_secs,
drain_over_ttl: entry.drain_over_ttl,
})
.collect(),
};
@@ -363,6 +371,8 @@ async fn get_minimal_payload_cached(
floor_capped: entry.floor_capped,
alive_writers: entry.alive_writers,
coverage_pct: entry.coverage_pct,
fresh_alive_writers: entry.fresh_alive_writers,
fresh_coverage_pct: entry.fresh_coverage_pct,
rtt_ms: entry.rtt_ms,
load: entry.load,
})
@@ -486,6 +496,8 @@ fn disabled_me_writers(now_epoch_secs: u64, reason: &'static str) -> MeWritersDa
required_writers: 0,
alive_writers: 0,
coverage_pct: 0.0,
fresh_alive_writers: 0,
fresh_coverage_pct: 0.0,
},
writers: Vec::new(),
}

View File

@@ -3,6 +3,7 @@ use std::sync::atomic::Ordering;
use serde::Serialize;
use crate::config::{MeFloorMode, MeWriterPickMode, ProxyConfig, UserMaxUniqueIpsMode};
use crate::proxy::route_mode::RelayRouteMode;
use super::ApiShared;
use super::runtime_init::build_runtime_startup_summary;
@@ -35,6 +36,10 @@ pub(super) struct RuntimeGatesData {
pub(super) me_runtime_ready: bool,
pub(super) me2dc_fallback_enabled: bool,
pub(super) use_middle_proxy: bool,
pub(super) route_mode: &'static str,
pub(super) reroute_active: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub(super) reroute_to_direct_at_epoch_secs: Option<u64>,
pub(super) startup_status: &'static str,
pub(super) startup_stage: String,
pub(super) startup_progress_pct: f64,
@@ -85,6 +90,7 @@ pub(super) struct EffectiveMiddleProxyLimits {
#[derive(Serialize)]
pub(super) struct EffectiveUserIpPolicyLimits {
pub(super) global_each: usize,
pub(super) mode: &'static str,
pub(super) window_secs: u64,
}
@@ -157,6 +163,16 @@ pub(super) async fn build_runtime_gates_data(
cfg: &ProxyConfig,
) -> RuntimeGatesData {
let startup_summary = build_runtime_startup_summary(shared).await;
let route_state = shared.route_runtime.snapshot();
let route_mode = route_state.mode.as_str();
let reroute_active = cfg.general.use_middle_proxy
&& cfg.general.me2dc_fallback
&& matches!(route_state.mode, RelayRouteMode::Direct);
let reroute_to_direct_at_epoch_secs = if reroute_active {
shared.route_runtime.direct_since_epoch_secs()
} else {
None
};
let me_runtime_ready = if !cfg.general.use_middle_proxy {
true
} else {
@@ -175,6 +191,9 @@ pub(super) async fn build_runtime_gates_data(
me_runtime_ready,
me2dc_fallback_enabled: cfg.general.me2dc_fallback,
use_middle_proxy: cfg.general.use_middle_proxy,
route_mode,
reroute_active,
reroute_to_direct_at_epoch_secs,
startup_status: startup_summary.status,
startup_stage: startup_summary.stage,
startup_progress_pct: startup_summary.progress_pct,
@@ -244,6 +263,7 @@ pub(super) fn build_limits_effective_data(cfg: &ProxyConfig) -> EffectiveLimitsD
me2dc_fallback: cfg.general.me2dc_fallback,
},
user_ip_policy: EffectiveUserIpPolicyLimits {
global_each: cfg.access.user_max_unique_ips_global_each,
mode: user_max_unique_ips_mode_label(cfg.access.user_max_unique_ips_mode),
window_secs: cfg.access.user_max_unique_ips_window_secs,
},

View File

@@ -386,7 +386,16 @@ pub(super) async fn users_from_config(
.get(&username)
.map(chrono::DateTime::<chrono::Utc>::to_rfc3339),
data_quota_bytes: cfg.access.user_data_quota.get(&username).copied(),
max_unique_ips: cfg.access.user_max_unique_ips.get(&username).copied(),
max_unique_ips: cfg
.access
.user_max_unique_ips
.get(&username)
.copied()
.filter(|limit| *limit > 0)
.or(
(cfg.access.user_max_unique_ips_global_each > 0)
.then_some(cfg.access.user_max_unique_ips_global_each),
),
current_connections: stats.get_user_curr_connects(&username),
active_unique_ips: active_ip_list.len(),
active_unique_ips_list: active_ip_list,

View File

@@ -147,6 +147,10 @@ pub(crate) fn default_proxy_protocol_header_timeout_ms() -> u64 {
500
}
pub(crate) fn default_server_max_connections() -> u32 {
10_000
}
pub(crate) fn default_prefer_4() -> u8 {
4
}
@@ -584,6 +588,10 @@ pub(crate) fn default_me_pool_drain_ttl_secs() -> u64 {
90
}
pub(crate) fn default_me_pool_drain_threshold() -> u64 {
128
}
pub(crate) fn default_me_bind_stale_ttl_secs() -> u64 {
default_me_pool_drain_ttl_secs()
}
@@ -635,6 +643,10 @@ pub(crate) fn default_user_max_unique_ips_window_secs() -> u64 {
DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS
}
pub(crate) fn default_user_max_unique_ips_global_each() -> usize {
0
}
// Custom deserializer helpers
#[derive(Deserialize)]

View File

@@ -21,9 +21,11 @@
//! `network.*`, `use_middle_proxy`) are **not** applied; a warning is emitted.
//! Non-hot changes are never mixed into the runtime config snapshot.
use std::collections::BTreeSet;
use std::net::IpAddr;
use std::path::PathBuf;
use std::sync::Arc;
use std::path::{Path, PathBuf};
use std::sync::{Arc, RwLock as StdRwLock};
use std::time::Duration;
use notify::{EventKind, RecursiveMode, Watcher, recommended_watcher};
use tokio::sync::{mpsc, watch};
@@ -33,7 +35,10 @@ use crate::config::{
LogLevel, MeBindStaleMode, MeFloorMode, MeSocksKdfPolicy, MeTelemetryLevel,
MeWriterPickMode,
};
use super::load::ProxyConfig;
use super::load::{LoadedConfig, ProxyConfig};
const HOT_RELOAD_STABLE_SNAPSHOTS: u8 = 2;
const HOT_RELOAD_DEBOUNCE: Duration = Duration::from_millis(50);
// ── Hot fields ────────────────────────────────────────────────────────────────
@@ -50,6 +55,7 @@ pub struct HotFields {
pub me_reinit_coalesce_window_ms: u64,
pub hardswap: bool,
pub me_pool_drain_ttl_secs: u64,
pub me_pool_drain_threshold: u64,
pub me_pool_min_fresh_ratio: f32,
pub me_reinit_drain_timeout_secs: u64,
pub me_hardswap_warmup_delay_min_ms: u64,
@@ -113,6 +119,7 @@ pub struct HotFields {
pub user_expirations: std::collections::HashMap<String, chrono::DateTime<chrono::Utc>>,
pub user_data_quota: std::collections::HashMap<String, u64>,
pub user_max_unique_ips: std::collections::HashMap<String, usize>,
pub user_max_unique_ips_global_each: usize,
pub user_max_unique_ips_mode: crate::config::UserMaxUniqueIpsMode,
pub user_max_unique_ips_window_secs: u64,
}
@@ -130,6 +137,7 @@ impl HotFields {
me_reinit_coalesce_window_ms: cfg.general.me_reinit_coalesce_window_ms,
hardswap: cfg.general.hardswap,
me_pool_drain_ttl_secs: cfg.general.me_pool_drain_ttl_secs,
me_pool_drain_threshold: cfg.general.me_pool_drain_threshold,
me_pool_min_fresh_ratio: cfg.general.me_pool_min_fresh_ratio,
me_reinit_drain_timeout_secs: cfg.general.me_reinit_drain_timeout_secs,
me_hardswap_warmup_delay_min_ms: cfg.general.me_hardswap_warmup_delay_min_ms,
@@ -227,6 +235,7 @@ impl HotFields {
user_expirations: cfg.access.user_expirations.clone(),
user_data_quota: cfg.access.user_data_quota.clone(),
user_max_unique_ips: cfg.access.user_max_unique_ips.clone(),
user_max_unique_ips_global_each: cfg.access.user_max_unique_ips_global_each,
user_max_unique_ips_mode: cfg.access.user_max_unique_ips_mode,
user_max_unique_ips_window_secs: cfg.access.user_max_unique_ips_window_secs,
}
@@ -287,6 +296,149 @@ fn listeners_equal(
})
}
#[derive(Debug, Clone, Default, PartialEq, Eq)]
struct WatchManifest {
files: BTreeSet<PathBuf>,
dirs: BTreeSet<PathBuf>,
}
impl WatchManifest {
fn from_source_files(source_files: &[PathBuf]) -> Self {
let mut files = BTreeSet::new();
let mut dirs = BTreeSet::new();
for path in source_files {
let normalized = normalize_watch_path(path);
files.insert(normalized.clone());
if let Some(parent) = normalized.parent() {
dirs.insert(parent.to_path_buf());
}
}
Self { files, dirs }
}
fn matches_event_paths(&self, event_paths: &[PathBuf]) -> bool {
event_paths
.iter()
.map(|path| normalize_watch_path(path))
.any(|path| self.files.contains(&path))
}
}
#[derive(Debug, Default)]
struct ReloadState {
applied_snapshot_hash: Option<u64>,
candidate_snapshot_hash: Option<u64>,
candidate_hits: u8,
}
impl ReloadState {
fn new(applied_snapshot_hash: Option<u64>) -> Self {
Self {
applied_snapshot_hash,
candidate_snapshot_hash: None,
candidate_hits: 0,
}
}
fn is_applied(&self, hash: u64) -> bool {
self.applied_snapshot_hash == Some(hash)
}
fn observe_candidate(&mut self, hash: u64) -> u8 {
if self.candidate_snapshot_hash == Some(hash) {
self.candidate_hits = self.candidate_hits.saturating_add(1);
} else {
self.candidate_snapshot_hash = Some(hash);
self.candidate_hits = 1;
}
self.candidate_hits
}
fn reset_candidate(&mut self) {
self.candidate_snapshot_hash = None;
self.candidate_hits = 0;
}
fn mark_applied(&mut self, hash: u64) {
self.applied_snapshot_hash = Some(hash);
self.reset_candidate();
}
}
fn normalize_watch_path(path: &Path) -> PathBuf {
path.canonicalize().unwrap_or_else(|_| {
if path.is_absolute() {
path.to_path_buf()
} else {
std::env::current_dir()
.map(|cwd| cwd.join(path))
.unwrap_or_else(|_| path.to_path_buf())
}
})
}
fn sync_watch_paths<W: Watcher>(
watcher: &mut W,
current: &BTreeSet<PathBuf>,
next: &BTreeSet<PathBuf>,
recursive_mode: RecursiveMode,
kind: &str,
) {
for path in current.difference(next) {
if let Err(e) = watcher.unwatch(path) {
warn!(path = %path.display(), error = %e, "config watcher: failed to unwatch {kind}");
}
}
for path in next.difference(current) {
if let Err(e) = watcher.watch(path, recursive_mode) {
warn!(path = %path.display(), error = %e, "config watcher: failed to watch {kind}");
}
}
}
fn apply_watch_manifest<W1: Watcher, W2: Watcher>(
notify_watcher: Option<&mut W1>,
poll_watcher: Option<&mut W2>,
manifest_state: &Arc<StdRwLock<WatchManifest>>,
next_manifest: WatchManifest,
) {
let current_manifest = manifest_state
.read()
.map(|manifest| manifest.clone())
.unwrap_or_default();
if current_manifest == next_manifest {
return;
}
if let Some(watcher) = notify_watcher {
sync_watch_paths(
watcher,
&current_manifest.dirs,
&next_manifest.dirs,
RecursiveMode::NonRecursive,
"config directory",
);
}
if let Some(watcher) = poll_watcher {
sync_watch_paths(
watcher,
&current_manifest.files,
&next_manifest.files,
RecursiveMode::NonRecursive,
"config file",
);
}
if let Ok(mut manifest) = manifest_state.write() {
*manifest = next_manifest;
}
}
fn overlay_hot_fields(old: &ProxyConfig, new: &ProxyConfig) -> ProxyConfig {
let mut cfg = old.clone();
@@ -302,6 +454,7 @@ fn overlay_hot_fields(old: &ProxyConfig, new: &ProxyConfig) -> ProxyConfig {
cfg.general.me_reinit_coalesce_window_ms = new.general.me_reinit_coalesce_window_ms;
cfg.general.hardswap = new.general.hardswap;
cfg.general.me_pool_drain_ttl_secs = new.general.me_pool_drain_ttl_secs;
cfg.general.me_pool_drain_threshold = new.general.me_pool_drain_threshold;
cfg.general.me_pool_min_fresh_ratio = new.general.me_pool_min_fresh_ratio;
cfg.general.me_reinit_drain_timeout_secs = new.general.me_reinit_drain_timeout_secs;
cfg.general.me_hardswap_warmup_delay_min_ms = new.general.me_hardswap_warmup_delay_min_ms;
@@ -384,6 +537,7 @@ fn overlay_hot_fields(old: &ProxyConfig, new: &ProxyConfig) -> ProxyConfig {
cfg.access.user_expirations = new.access.user_expirations.clone();
cfg.access.user_data_quota = new.access.user_data_quota.clone();
cfg.access.user_max_unique_ips = new.access.user_max_unique_ips.clone();
cfg.access.user_max_unique_ips_global_each = new.access.user_max_unique_ips_global_each;
cfg.access.user_max_unique_ips_mode = new.access.user_max_unique_ips_mode;
cfg.access.user_max_unique_ips_window_secs = new.access.user_max_unique_ips_window_secs;
@@ -675,6 +829,13 @@ fn log_changes(
);
}
if old_hot.me_pool_drain_threshold != new_hot.me_pool_drain_threshold {
info!(
"config reload: me_pool_drain_threshold: {} → {}",
old_hot.me_pool_drain_threshold, new_hot.me_pool_drain_threshold,
);
}
if (old_hot.me_pool_min_fresh_ratio - new_hot.me_pool_min_fresh_ratio).abs() > f32::EPSILON {
info!(
"config reload: me_pool_min_fresh_ratio: {:.3} → {:.3}",
@@ -951,12 +1112,14 @@ fn log_changes(
new_hot.user_max_unique_ips.len()
);
}
if old_hot.user_max_unique_ips_mode != new_hot.user_max_unique_ips_mode
if old_hot.user_max_unique_ips_global_each != new_hot.user_max_unique_ips_global_each
|| old_hot.user_max_unique_ips_mode != new_hot.user_max_unique_ips_mode
|| old_hot.user_max_unique_ips_window_secs
!= new_hot.user_max_unique_ips_window_secs
{
info!(
"config reload: user_max_unique_ips policy mode={:?} window={}s",
"config reload: user_max_unique_ips policy global_each={} mode={:?} window={}s",
new_hot.user_max_unique_ips_global_each,
new_hot.user_max_unique_ips_mode,
new_hot.user_max_unique_ips_window_secs
);
@@ -970,18 +1133,42 @@ fn reload_config(
log_tx: &watch::Sender<LogLevel>,
detected_ip_v4: Option<IpAddr>,
detected_ip_v6: Option<IpAddr>,
) {
let new_cfg = match ProxyConfig::load(config_path) {
Ok(c) => c,
reload_state: &mut ReloadState,
) -> Option<WatchManifest> {
let loaded = match ProxyConfig::load_with_metadata(config_path) {
Ok(loaded) => loaded,
Err(e) => {
reload_state.reset_candidate();
error!("config reload: failed to parse {:?}: {}", config_path, e);
return;
return None;
}
};
let LoadedConfig {
config: new_cfg,
source_files,
rendered_hash,
} = loaded;
let next_manifest = WatchManifest::from_source_files(&source_files);
if let Err(e) = new_cfg.validate() {
reload_state.reset_candidate();
error!("config reload: validation failed: {}; keeping old config", e);
return;
return Some(next_manifest);
}
if reload_state.is_applied(rendered_hash) {
return Some(next_manifest);
}
let candidate_hits = reload_state.observe_candidate(rendered_hash);
if candidate_hits < HOT_RELOAD_STABLE_SNAPSHOTS {
info!(
snapshot_hash = rendered_hash,
candidate_hits,
required_hits = HOT_RELOAD_STABLE_SNAPSHOTS,
"config reload: candidate snapshot observed but not stable yet"
);
return Some(next_manifest);
}
let old_cfg = config_tx.borrow().clone();
@@ -996,17 +1183,19 @@ fn reload_config(
}
if !hot_changed {
return;
reload_state.mark_applied(rendered_hash);
return Some(next_manifest);
}
if old_hot.dns_overrides != applied_hot.dns_overrides
&& let Err(e) = crate::network::dns_overrides::install_entries(&applied_hot.dns_overrides)
{
reload_state.reset_candidate();
error!(
"config reload: invalid network.dns_overrides: {}; keeping old config",
e
);
return;
return Some(next_manifest);
}
log_changes(
@@ -1018,6 +1207,8 @@ fn reload_config(
detected_ip_v6,
);
config_tx.send(Arc::new(applied_cfg)).ok();
reload_state.mark_applied(rendered_hash);
Some(next_manifest)
}
// ── Public API ────────────────────────────────────────────────────────────────
@@ -1040,80 +1231,86 @@ pub fn spawn_config_watcher(
let (config_tx, config_rx) = watch::channel(initial);
let (log_tx, log_rx) = watch::channel(initial_level);
// Bridge: sync notify callbacks → async task via mpsc.
let (notify_tx, mut notify_rx) = mpsc::channel::<()>(4);
let config_path = normalize_watch_path(&config_path);
let initial_loaded = ProxyConfig::load_with_metadata(&config_path).ok();
let initial_manifest = initial_loaded
.as_ref()
.map(|loaded| WatchManifest::from_source_files(&loaded.source_files))
.unwrap_or_else(|| WatchManifest::from_source_files(std::slice::from_ref(&config_path)));
let initial_snapshot_hash = initial_loaded.as_ref().map(|loaded| loaded.rendered_hash);
// Canonicalize so path matches what notify returns (absolute) in events.
let config_path = match config_path.canonicalize() {
Ok(p) => p,
Err(_) => config_path.to_path_buf(),
};
// Watch the parent directory rather than the file itself, because many
// editors (vim, nano) and systemd write via rename, which would cause
// inotify to lose track of the original inode.
let watch_dir = config_path
.parent()
.unwrap_or_else(|| std::path::Path::new("."))
.to_path_buf();
// ── inotify watcher (instant on local fs) ────────────────────────────
let config_file = config_path.clone();
let tx_inotify = notify_tx.clone();
let inotify_ok = match recommended_watcher(move |res: notify::Result<notify::Event>| {
let Ok(event) = res else { return };
let is_our_file = event.paths.iter().any(|p| p == &config_file);
if !is_our_file { return; }
if matches!(event.kind, EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)) {
let _ = tx_inotify.try_send(());
}
}) {
Ok(mut w) => match w.watch(&watch_dir, RecursiveMode::NonRecursive) {
Ok(()) => {
info!("config watcher: inotify active on {:?}", config_path);
Box::leak(Box::new(w));
true
}
Err(e) => { warn!("config watcher: inotify watch failed: {}", e); false }
},
Err(e) => { warn!("config watcher: inotify unavailable: {}", e); false }
};
// ── poll watcher (always active, fixes Docker bind mounts / NFS) ─────
// inotify does not receive events for files mounted from the host into
// a container. PollWatcher compares file contents every 3 s and fires
// on any change regardless of the underlying fs.
let config_file2 = config_path.clone();
let tx_poll = notify_tx.clone();
match notify::poll::PollWatcher::new(
move |res: notify::Result<notify::Event>| {
let Ok(event) = res else { return };
let is_our_file = event.paths.iter().any(|p| p == &config_file2);
if !is_our_file { return; }
if matches!(event.kind, EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)) {
let _ = tx_poll.try_send(());
}
},
notify::Config::default()
.with_poll_interval(std::time::Duration::from_secs(3))
.with_compare_contents(true),
) {
Ok(mut w) => match w.watch(&config_path, RecursiveMode::NonRecursive) {
Ok(()) => {
if inotify_ok {
info!("config watcher: poll watcher also active (Docker/NFS safe)");
} else {
info!("config watcher: poll watcher active on {:?} (3s interval)", config_path);
}
Box::leak(Box::new(w));
}
Err(e) => warn!("config watcher: poll watch failed: {}", e),
},
Err(e) => warn!("config watcher: poll watcher unavailable: {}", e),
}
// ── event loop ───────────────────────────────────────────────────────
tokio::spawn(async move {
let (notify_tx, mut notify_rx) = mpsc::channel::<()>(4);
let manifest_state = Arc::new(StdRwLock::new(WatchManifest::default()));
let mut reload_state = ReloadState::new(initial_snapshot_hash);
let tx_inotify = notify_tx.clone();
let manifest_for_inotify = manifest_state.clone();
let mut inotify_watcher = match recommended_watcher(move |res: notify::Result<notify::Event>| {
let Ok(event) = res else { return };
if !matches!(event.kind, EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)) {
return;
}
let is_our_file = manifest_for_inotify
.read()
.map(|manifest| manifest.matches_event_paths(&event.paths))
.unwrap_or(false);
if is_our_file {
let _ = tx_inotify.try_send(());
}
}) {
Ok(watcher) => Some(watcher),
Err(e) => {
warn!("config watcher: inotify unavailable: {}", e);
None
}
};
apply_watch_manifest(
inotify_watcher.as_mut(),
Option::<&mut notify::poll::PollWatcher>::None,
&manifest_state,
initial_manifest.clone(),
);
if inotify_watcher.is_some() {
info!("config watcher: inotify active on {:?}", config_path);
}
let tx_poll = notify_tx.clone();
let manifest_for_poll = manifest_state.clone();
let mut poll_watcher = match notify::poll::PollWatcher::new(
move |res: notify::Result<notify::Event>| {
let Ok(event) = res else { return };
if !matches!(event.kind, EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)) {
return;
}
let is_our_file = manifest_for_poll
.read()
.map(|manifest| manifest.matches_event_paths(&event.paths))
.unwrap_or(false);
if is_our_file {
let _ = tx_poll.try_send(());
}
},
notify::Config::default()
.with_poll_interval(Duration::from_secs(3))
.with_compare_contents(true),
) {
Ok(watcher) => Some(watcher),
Err(e) => {
warn!("config watcher: poll watcher unavailable: {}", e);
None
}
};
apply_watch_manifest(
Option::<&mut notify::RecommendedWatcher>::None,
poll_watcher.as_mut(),
&manifest_state,
initial_manifest.clone(),
);
if poll_watcher.is_some() {
info!("config watcher: poll watcher active (Docker/NFS safe)");
}
#[cfg(unix)]
let mut sighup = {
use tokio::signal::unix::{SignalKind, signal};
@@ -1133,11 +1330,25 @@ pub fn spawn_config_watcher(
#[cfg(not(unix))]
if notify_rx.recv().await.is_none() { break; }
// Debounce: drain extra events that arrive within 50 ms.
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
// Debounce: drain extra events that arrive within a short quiet window.
tokio::time::sleep(HOT_RELOAD_DEBOUNCE).await;
while notify_rx.try_recv().is_ok() {}
reload_config(&config_path, &config_tx, &log_tx, detected_ip_v4, detected_ip_v6);
if let Some(next_manifest) = reload_config(
&config_path,
&config_tx,
&log_tx,
detected_ip_v4,
detected_ip_v6,
&mut reload_state,
) {
apply_watch_manifest(
inotify_watcher.as_mut(),
poll_watcher.as_mut(),
&manifest_state,
next_manifest,
);
}
}
});
@@ -1152,6 +1363,40 @@ mod tests {
ProxyConfig::default()
}
fn write_reload_config(path: &Path, ad_tag: Option<&str>, server_port: Option<u16>) {
let mut config = String::from(
r#"
[censorship]
tls_domain = "example.com"
[access.users]
user = "00000000000000000000000000000000"
"#,
);
if ad_tag.is_some() {
config.push_str("\n[general]\n");
if let Some(tag) = ad_tag {
config.push_str(&format!("ad_tag = \"{tag}\"\n"));
}
}
if let Some(port) = server_port {
config.push_str("\n[server]\n");
config.push_str(&format!("port = {port}\n"));
}
std::fs::write(path, config).unwrap();
}
fn temp_config_path(prefix: &str) -> PathBuf {
let nonce = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos();
std::env::temp_dir().join(format!("{prefix}_{nonce}.toml"))
}
#[test]
fn overlay_applies_hot_and_preserves_non_hot() {
let old = sample_config();
@@ -1219,4 +1464,61 @@ mod tests {
assert_eq!(applied.general.use_middle_proxy, old.general.use_middle_proxy);
assert!(!config_equal(&applied, &new));
}
#[test]
fn reload_requires_stable_snapshot_before_hot_apply() {
let initial_tag = "11111111111111111111111111111111";
let final_tag = "22222222222222222222222222222222";
let path = temp_config_path("telemt_hot_reload_stable");
write_reload_config(&path, Some(initial_tag), None);
let initial_cfg = Arc::new(ProxyConfig::load(&path).unwrap());
let initial_hash = ProxyConfig::load_with_metadata(&path).unwrap().rendered_hash;
let (config_tx, _config_rx) = watch::channel(initial_cfg.clone());
let (log_tx, _log_rx) = watch::channel(initial_cfg.general.log_level.clone());
let mut reload_state = ReloadState::new(Some(initial_hash));
write_reload_config(&path, None, None);
reload_config(&path, &config_tx, &log_tx, None, None, &mut reload_state).unwrap();
assert_eq!(
config_tx.borrow().general.ad_tag.as_deref(),
Some(initial_tag)
);
write_reload_config(&path, Some(final_tag), None);
reload_config(&path, &config_tx, &log_tx, None, None, &mut reload_state).unwrap();
assert_eq!(
config_tx.borrow().general.ad_tag.as_deref(),
Some(initial_tag)
);
reload_config(&path, &config_tx, &log_tx, None, None, &mut reload_state).unwrap();
assert_eq!(config_tx.borrow().general.ad_tag.as_deref(), Some(final_tag));
let _ = std::fs::remove_file(path);
}
#[test]
fn reload_keeps_hot_apply_when_non_hot_fields_change() {
let initial_tag = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
let final_tag = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb";
let path = temp_config_path("telemt_hot_reload_mixed");
write_reload_config(&path, Some(initial_tag), None);
let initial_cfg = Arc::new(ProxyConfig::load(&path).unwrap());
let initial_hash = ProxyConfig::load_with_metadata(&path).unwrap().rendered_hash;
let (config_tx, _config_rx) = watch::channel(initial_cfg.clone());
let (log_tx, _log_rx) = watch::channel(initial_cfg.general.log_level.clone());
let mut reload_state = ReloadState::new(Some(initial_hash));
write_reload_config(&path, Some(final_tag), Some(initial_cfg.server.port + 1));
reload_config(&path, &config_tx, &log_tx, None, None, &mut reload_state).unwrap();
reload_config(&path, &config_tx, &log_tx, None, None, &mut reload_state).unwrap();
let applied = config_tx.borrow().clone();
assert_eq!(applied.general.ad_tag.as_deref(), Some(final_tag));
assert_eq!(applied.server.port, initial_cfg.server.port);
let _ = std::fs::remove_file(path);
}
}

View File

@@ -1,8 +1,9 @@
#![allow(deprecated)]
use std::collections::HashMap;
use std::collections::{BTreeSet, HashMap};
use std::hash::{DefaultHasher, Hash, Hasher};
use std::net::{IpAddr, SocketAddr};
use std::path::Path;
use std::path::{Path, PathBuf};
use rand::Rng;
use tracing::warn;
@@ -13,7 +14,37 @@ use crate::error::{ProxyError, Result};
use super::defaults::*;
use super::types::*;
fn preprocess_includes(content: &str, base_dir: &Path, depth: u8) -> Result<String> {
#[derive(Debug, Clone)]
pub(crate) struct LoadedConfig {
pub(crate) config: ProxyConfig,
pub(crate) source_files: Vec<PathBuf>,
pub(crate) rendered_hash: u64,
}
fn normalize_config_path(path: &Path) -> PathBuf {
path.canonicalize().unwrap_or_else(|_| {
if path.is_absolute() {
path.to_path_buf()
} else {
std::env::current_dir()
.map(|cwd| cwd.join(path))
.unwrap_or_else(|_| path.to_path_buf())
}
})
}
fn hash_rendered_snapshot(rendered: &str) -> u64 {
let mut hasher = DefaultHasher::new();
rendered.hash(&mut hasher);
hasher.finish()
}
fn preprocess_includes(
content: &str,
base_dir: &Path,
depth: u8,
source_files: &mut BTreeSet<PathBuf>,
) -> Result<String> {
if depth > 10 {
return Err(ProxyError::Config("Include depth > 10".into()));
}
@@ -25,10 +56,16 @@ fn preprocess_includes(content: &str, base_dir: &Path, depth: u8) -> Result<Stri
if let Some(rest) = rest.strip_prefix('=') {
let path_str = rest.trim().trim_matches('"');
let resolved = base_dir.join(path_str);
source_files.insert(normalize_config_path(&resolved));
let included = std::fs::read_to_string(&resolved)
.map_err(|e| ProxyError::Config(e.to_string()))?;
let included_dir = resolved.parent().unwrap_or(base_dir);
output.push_str(&preprocess_includes(&included, included_dir, depth + 1)?);
output.push_str(&preprocess_includes(
&included,
included_dir,
depth + 1,
source_files,
)?);
output.push('\n');
continue;
}
@@ -138,10 +175,16 @@ pub struct ProxyConfig {
impl ProxyConfig {
pub fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
let content =
std::fs::read_to_string(&path).map_err(|e| ProxyError::Config(e.to_string()))?;
let base_dir = path.as_ref().parent().unwrap_or(Path::new("."));
let processed = preprocess_includes(&content, base_dir, 0)?;
Self::load_with_metadata(path).map(|loaded| loaded.config)
}
pub(crate) fn load_with_metadata<P: AsRef<Path>>(path: P) -> Result<LoadedConfig> {
let path = path.as_ref();
let content = std::fs::read_to_string(path).map_err(|e| ProxyError::Config(e.to_string()))?;
let base_dir = path.parent().unwrap_or(Path::new("."));
let mut source_files = BTreeSet::new();
source_files.insert(normalize_config_path(path));
let processed = preprocess_includes(&content, base_dir, 0, &mut source_files)?;
let parsed_toml: toml::Value =
toml::from_str(&processed).map_err(|e| ProxyError::Config(e.to_string()))?;
@@ -786,7 +829,11 @@ impl ProxyConfig {
.entry("203".to_string())
.or_insert_with(|| vec!["91.105.192.100:443".to_string()]);
Ok(config)
Ok(LoadedConfig {
config,
source_files: source_files.into_iter().collect(),
rendered_hash: hash_rendered_snapshot(&processed),
})
}
pub fn validate(&self) -> Result<()> {
@@ -1111,6 +1158,48 @@ mod tests {
);
}
#[test]
fn load_with_metadata_collects_include_files() {
let nonce = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos();
let dir = std::env::temp_dir().join(format!("telemt_load_metadata_{nonce}"));
std::fs::create_dir_all(&dir).unwrap();
let main_path = dir.join("config.toml");
let include_path = dir.join("included.toml");
std::fs::write(
&include_path,
r#"
[access.users]
user = "00000000000000000000000000000000"
"#,
)
.unwrap();
std::fs::write(
&main_path,
r#"
include = "included.toml"
[censorship]
tls_domain = "example.com"
"#,
)
.unwrap();
let loaded = ProxyConfig::load_with_metadata(&main_path).unwrap();
let main_normalized = normalize_config_path(&main_path);
let include_normalized = normalize_config_path(&include_path);
assert!(loaded.source_files.contains(&main_normalized));
assert!(loaded.source_files.contains(&include_normalized));
let _ = std::fs::remove_file(main_path);
let _ = std::fs::remove_file(include_path);
let _ = std::fs::remove_dir(dir);
}
#[test]
fn dc_overrides_inject_dc203_default() {
let toml = r#"

View File

@@ -3,6 +3,7 @@ use ipnetwork::IpNetwork;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::net::IpAddr;
use std::path::PathBuf;
use super::defaults::*;
@@ -356,6 +357,9 @@ impl Default for NetworkConfig {
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GeneralConfig {
#[serde(default)]
pub data_path: Option<PathBuf>,
#[serde(default)]
pub modes: ProxyModes,
@@ -794,6 +798,11 @@ pub struct GeneralConfig {
#[serde(default = "default_me_pool_drain_ttl_secs")]
pub me_pool_drain_ttl_secs: u64,
/// Maximum allowed number of draining ME writers before oldest ones are force-closed in batches.
/// Set to 0 to disable threshold-based draining cleanup and keep timeout-only behavior.
#[serde(default = "default_me_pool_drain_threshold")]
pub me_pool_drain_threshold: u64,
/// Policy for new binds on stale draining writers.
#[serde(default)]
pub me_bind_stale_mode: MeBindStaleMode,
@@ -866,6 +875,7 @@ pub struct GeneralConfig {
impl Default for GeneralConfig {
fn default() -> Self {
Self {
data_path: None,
modes: ProxyModes::default(),
prefer_ipv6: false,
fast_mode: default_true(),
@@ -973,6 +983,7 @@ impl Default for GeneralConfig {
me_secret_atomic_snapshot: default_me_secret_atomic_snapshot(),
proxy_secret_len_max: default_proxy_secret_len_max(),
me_pool_drain_ttl_secs: default_me_pool_drain_ttl_secs(),
me_pool_drain_threshold: default_me_pool_drain_threshold(),
me_bind_stale_mode: MeBindStaleMode::default(),
me_bind_stale_ttl_secs: default_me_bind_stale_ttl_secs(),
me_pool_min_fresh_ratio: default_me_pool_min_fresh_ratio(),
@@ -1156,6 +1167,11 @@ pub struct ServerConfig {
#[serde(default)]
pub listeners: Vec<ListenerConfig>,
/// Maximum number of concurrent client connections.
/// 0 means unlimited.
#[serde(default = "default_server_max_connections")]
pub max_connections: u32,
}
impl Default for ServerConfig {
@@ -1173,6 +1189,7 @@ impl Default for ServerConfig {
metrics_whitelist: default_metrics_whitelist(),
api: ApiConfig::default(),
listeners: Vec::new(),
max_connections: default_server_max_connections(),
}
}
}
@@ -1317,6 +1334,11 @@ pub struct AccessConfig {
#[serde(default)]
pub user_max_unique_ips: HashMap<String, usize>,
/// Global per-user unique IP limit applied when a user has no individual override.
/// `0` disables the inherited limit.
#[serde(default = "default_user_max_unique_ips_global_each")]
pub user_max_unique_ips_global_each: usize,
#[serde(default)]
pub user_max_unique_ips_mode: UserMaxUniqueIpsMode,
@@ -1342,6 +1364,7 @@ impl Default for AccessConfig {
user_expirations: HashMap::new(),
user_data_quota: HashMap::new(),
user_max_unique_ips: HashMap::new(),
user_max_unique_ips_global_each: default_user_max_unique_ips_global_each(),
user_max_unique_ips_mode: UserMaxUniqueIpsMode::default(),
user_max_unique_ips_window_secs: default_user_max_unique_ips_window_secs(),
replay_check_len: default_replay_check_len(),

View File

@@ -17,6 +17,7 @@ pub struct UserIpTracker {
active_ips: Arc<RwLock<HashMap<String, HashMap<IpAddr, usize>>>>,
recent_ips: Arc<RwLock<HashMap<String, HashMap<IpAddr, Instant>>>>,
max_ips: Arc<RwLock<HashMap<String, usize>>>,
default_max_ips: Arc<RwLock<usize>>,
limit_mode: Arc<RwLock<UserMaxUniqueIpsMode>>,
limit_window: Arc<RwLock<Duration>>,
last_compact_epoch_secs: Arc<AtomicU64>,
@@ -28,6 +29,7 @@ impl UserIpTracker {
active_ips: Arc::new(RwLock::new(HashMap::new())),
recent_ips: Arc::new(RwLock::new(HashMap::new())),
max_ips: Arc::new(RwLock::new(HashMap::new())),
default_max_ips: Arc::new(RwLock::new(0)),
limit_mode: Arc::new(RwLock::new(UserMaxUniqueIpsMode::ActiveWindow)),
limit_window: Arc::new(RwLock::new(Duration::from_secs(30))),
last_compact_epoch_secs: Arc::new(AtomicU64::new(0)),
@@ -100,7 +102,10 @@ impl UserIpTracker {
limits.remove(username);
}
pub async fn load_limits(&self, limits: &HashMap<String, usize>) {
pub async fn load_limits(&self, default_limit: usize, limits: &HashMap<String, usize>) {
let mut default_max_ips = self.default_max_ips.write().await;
*default_max_ips = default_limit;
drop(default_max_ips);
let mut max_ips = self.max_ips.write().await;
max_ips.clone_from(limits);
}
@@ -114,9 +119,14 @@ impl UserIpTracker {
pub async fn check_and_add(&self, username: &str, ip: IpAddr) -> Result<(), String> {
self.maybe_compact_empty_users().await;
let default_max_ips = *self.default_max_ips.read().await;
let limit = {
let max_ips = self.max_ips.read().await;
max_ips.get(username).copied()
max_ips
.get(username)
.copied()
.filter(|limit| *limit > 0)
.or((default_max_ips > 0).then_some(default_max_ips))
};
let mode = *self.limit_mode.read().await;
let window = *self.limit_window.read().await;
@@ -255,10 +265,16 @@ impl UserIpTracker {
pub async fn get_stats(&self) -> Vec<(String, usize, usize)> {
let active_ips = self.active_ips.read().await;
let max_ips = self.max_ips.read().await;
let default_max_ips = *self.default_max_ips.read().await;
let mut stats = Vec::new();
for (username, user_ips) in active_ips.iter() {
let limit = max_ips.get(username).copied().unwrap_or(0);
let limit = max_ips
.get(username)
.copied()
.filter(|limit| *limit > 0)
.or((default_max_ips > 0).then_some(default_max_ips))
.unwrap_or(0);
stats.push((username.clone(), user_ips.len(), limit));
}
@@ -293,8 +309,13 @@ impl UserIpTracker {
}
pub async fn get_user_limit(&self, username: &str) -> Option<usize> {
let default_max_ips = *self.default_max_ips.read().await;
let max_ips = self.max_ips.read().await;
max_ips.get(username).copied()
max_ips
.get(username)
.copied()
.filter(|limit| *limit > 0)
.or((default_max_ips > 0).then_some(default_max_ips))
}
pub async fn format_stats(&self) -> String {
@@ -546,7 +567,7 @@ mod tests {
config_limits.insert("user1".to_string(), 5);
config_limits.insert("user2".to_string(), 3);
tracker.load_limits(&config_limits).await;
tracker.load_limits(0, &config_limits).await;
assert_eq!(tracker.get_user_limit("user1").await, Some(5));
assert_eq!(tracker.get_user_limit("user2").await, Some(3));
@@ -560,16 +581,46 @@ mod tests {
let mut first = HashMap::new();
first.insert("user1".to_string(), 2);
first.insert("user2".to_string(), 3);
tracker.load_limits(&first).await;
tracker.load_limits(0, &first).await;
let mut second = HashMap::new();
second.insert("user2".to_string(), 5);
tracker.load_limits(&second).await;
tracker.load_limits(0, &second).await;
assert_eq!(tracker.get_user_limit("user1").await, None);
assert_eq!(tracker.get_user_limit("user2").await, Some(5));
}
#[tokio::test]
async fn test_global_each_limit_applies_without_user_override() {
let tracker = UserIpTracker::new();
tracker.load_limits(2, &HashMap::new()).await;
let ip1 = test_ipv4(172, 16, 0, 1);
let ip2 = test_ipv4(172, 16, 0, 2);
let ip3 = test_ipv4(172, 16, 0, 3);
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
assert_eq!(tracker.get_user_limit("test_user").await, Some(2));
}
#[tokio::test]
async fn test_user_override_wins_over_global_each_limit() {
let tracker = UserIpTracker::new();
let mut limits = HashMap::new();
limits.insert("test_user".to_string(), 1);
tracker.load_limits(3, &limits).await;
let ip1 = test_ipv4(172, 17, 0, 1);
let ip2 = test_ipv4(172, 17, 0, 2);
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
assert_eq!(tracker.get_user_limit("test_user").await, Some(1));
}
#[tokio::test]
async fn test_time_window_mode_blocks_recent_ip_churn() {
let tracker = UserIpTracker::new();

View File

@@ -1,4 +1,5 @@
use std::time::Duration;
use std::path::PathBuf;
use tokio::sync::watch;
use tracing::{debug, error, info, warn};
@@ -9,8 +10,9 @@ use crate::transport::middle_proxy::{
ProxyConfigData, fetch_proxy_config_with_raw, load_proxy_config_cache, save_proxy_config_cache,
};
pub(crate) fn parse_cli() -> (String, bool, Option<String>) {
pub(crate) fn parse_cli() -> (String, Option<PathBuf>, bool, Option<String>) {
let mut config_path = "config.toml".to_string();
let mut data_path: Option<PathBuf> = None;
let mut silent = false;
let mut log_level: Option<String> = None;
@@ -28,6 +30,18 @@ pub(crate) fn parse_cli() -> (String, bool, Option<String>) {
let mut i = 0;
while i < args.len() {
match args[i].as_str() {
"--data-path" => {
i += 1;
if i < args.len() {
data_path = Some(PathBuf::from(args[i].clone()));
} else {
eprintln!("Missing value for --data-path");
std::process::exit(0);
}
}
s if s.starts_with("--data-path=") => {
data_path = Some(PathBuf::from(s.trim_start_matches("--data-path=").to_string()));
}
"--silent" | "-s" => {
silent = true;
}
@@ -44,6 +58,7 @@ pub(crate) fn parse_cli() -> (String, bool, Option<String>) {
eprintln!("Usage: telemt [config.toml] [OPTIONS]");
eprintln!();
eprintln!("Options:");
eprintln!(" --data-path <DIR> Set data directory (absolute path; overrides config value)");
eprintln!(" --silent, -s Suppress info logs");
eprintln!(" --log-level <LEVEL> debug|verbose|normal|silent");
eprintln!(" --help, -h Show this help");
@@ -78,7 +93,7 @@ pub(crate) fn parse_cli() -> (String, bool, Option<String>) {
i += 1;
}
(config_path, silent, log_level)
(config_path, data_path, silent, log_level)
}
pub(crate) fn print_proxy_links(host: &str, port: u16, config: &ProxyConfig) {

View File

@@ -237,6 +237,7 @@ pub(crate) async fn initialize_me_pool(
config.general.me_adaptive_floor_max_warm_writers_global,
config.general.hardswap,
config.general.me_pool_drain_ttl_secs,
config.general.me_pool_drain_threshold,
config.general.effective_me_pool_force_close_secs(),
config.general.me_pool_min_fresh_ratio,
config.general.me_hardswap_warmup_delay_min_ms,

View File

@@ -58,7 +58,7 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
startup_tracker
.start_component(COMPONENT_CONFIG_LOAD, Some("load and validate config".to_string()))
.await;
let (config_path, cli_silent, cli_log_level) = parse_cli();
let (config_path, data_path, cli_silent, cli_log_level) = parse_cli();
let mut config = match ProxyConfig::load(&config_path) {
Ok(c) => c,
@@ -80,6 +80,34 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
std::process::exit(1);
}
if let Some(p) = data_path {
config.general.data_path = Some(p);
}
if let Some(ref data_path) = config.general.data_path {
if !data_path.is_absolute() {
eprintln!("[telemt] data_path must be absolute: {}", data_path.display());
std::process::exit(1);
}
if data_path.exists() {
if !data_path.is_dir() {
eprintln!("[telemt] data_path exists but is not a directory: {}", data_path.display());
std::process::exit(1);
}
} else {
if let Err(e) = std::fs::create_dir_all(data_path) {
eprintln!("[telemt] Can't create data_path {}: {}", data_path.display(), e);
std::process::exit(1);
}
}
if let Err(e) = std::env::set_current_dir(data_path) {
eprintln!("[telemt] Can't use data_path {}: {}", data_path.display(), e);
std::process::exit(1);
}
}
if let Err(e) = crate::network::dns_overrides::install_entries(&config.network.dns_overrides) {
eprintln!("[telemt] Invalid network.dns_overrides: {}", e);
std::process::exit(1);
@@ -168,17 +196,24 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
stats.clone(),
));
let ip_tracker = Arc::new(UserIpTracker::new());
ip_tracker.load_limits(&config.access.user_max_unique_ips).await;
ip_tracker
.load_limits(
config.access.user_max_unique_ips_global_each,
&config.access.user_max_unique_ips,
)
.await;
ip_tracker
.set_limit_policy(
config.access.user_max_unique_ips_mode,
config.access.user_max_unique_ips_window_secs,
)
.await;
if !config.access.user_max_unique_ips.is_empty() {
if config.access.user_max_unique_ips_global_each > 0 || !config.access.user_max_unique_ips.is_empty()
{
info!(
"IP limits configured for {} users",
config.access.user_max_unique_ips.len()
global_each_limit = config.access.user_max_unique_ips_global_each,
explicit_user_limits = config.access.user_max_unique_ips.len(),
"User unique IP limits configured"
);
}
if !config.network.dns_overrides.is_empty() {
@@ -220,6 +255,7 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
let ip_tracker_api = ip_tracker.clone();
let me_pool_api = api_me_pool.clone();
let upstream_manager_api = upstream_manager.clone();
let route_runtime_api = route_runtime.clone();
let config_rx_api = api_config_rx.clone();
let admission_rx_api = admission_rx.clone();
let config_path_api = std::path::PathBuf::from(&config_path);
@@ -231,6 +267,7 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
stats_api,
ip_tracker_api,
me_pool_api,
route_runtime_api,
upstream_manager_api,
config_rx_api,
admission_rx_api,
@@ -285,6 +322,7 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
.await;
let probe = run_probe(
&config.network,
&config.upstreams,
config.general.middle_proxy_nat_probe,
config.general.stun_nat_probe_concurrency,
)
@@ -293,7 +331,11 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
probe.detected_ipv4.map(IpAddr::V4),
probe.detected_ipv6.map(IpAddr::V6),
));
let decision = decide_network_capabilities(&config.network, &probe);
let decision = decide_network_capabilities(
&config.network,
&probe,
config.general.middle_proxy_nat_ip,
);
log_probe_result(&probe, &decision);
startup_tracker
.complete_component(
@@ -307,8 +349,13 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
let beobachten = Arc::new(BeobachtenStore::new());
let rng = Arc::new(SecureRandom::new());
// Connection concurrency limit
let max_connections = Arc::new(Semaphore::new(10_000));
// Connection concurrency limit (0 = unlimited)
let max_connections_limit = if config.server.max_connections == 0 {
Semaphore::MAX_PERMITS
} else {
config.server.max_connections as usize
};
let max_connections = Arc::new(Semaphore::new(max_connections_limit));
let me2dc_fallback = config.general.me2dc_fallback;
let me_init_retry_attempts = config.general.me_init_retry_attempts;

View File

@@ -131,6 +131,10 @@ pub(crate) async fn spawn_runtime_tasks(
let mut config_rx_ip_limits = config_rx.clone();
tokio::spawn(async move {
let mut prev_limits = config_rx_ip_limits.borrow().access.user_max_unique_ips.clone();
let mut prev_global_each = config_rx_ip_limits
.borrow()
.access
.user_max_unique_ips_global_each;
let mut prev_mode = config_rx_ip_limits.borrow().access.user_max_unique_ips_mode;
let mut prev_window = config_rx_ip_limits
.borrow()
@@ -143,9 +147,17 @@ pub(crate) async fn spawn_runtime_tasks(
}
let cfg = config_rx_ip_limits.borrow_and_update().clone();
if prev_limits != cfg.access.user_max_unique_ips {
ip_tracker_policy.load_limits(&cfg.access.user_max_unique_ips).await;
if prev_limits != cfg.access.user_max_unique_ips
|| prev_global_each != cfg.access.user_max_unique_ips_global_each
{
ip_tracker_policy
.load_limits(
cfg.access.user_max_unique_ips_global_each,
&cfg.access.user_max_unique_ips,
)
.await;
prev_limits = cfg.access.user_max_unique_ips.clone();
prev_global_each = cfg.access.user_max_unique_ips_global_each;
}
if prev_mode != cfg.access.user_max_unique_ips_mode

View File

@@ -1774,14 +1774,24 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
"# HELP telemt_user_unique_ips_recent_window Per-user unique IPs seen in configured observation window"
);
let _ = writeln!(out, "# TYPE telemt_user_unique_ips_recent_window gauge");
let _ = writeln!(out, "# HELP telemt_user_unique_ips_limit Per-user configured unique IP limit (0 means unlimited)");
let _ = writeln!(out, "# HELP telemt_user_unique_ips_limit Effective per-user unique IP limit (0 means unlimited)");
let _ = writeln!(out, "# TYPE telemt_user_unique_ips_limit gauge");
let _ = writeln!(out, "# HELP telemt_user_unique_ips_utilization Per-user unique IP usage ratio (0 for unlimited)");
let _ = writeln!(out, "# TYPE telemt_user_unique_ips_utilization gauge");
for user in unique_users {
let current = ip_counts.get(&user).copied().unwrap_or(0);
let limit = config.access.user_max_unique_ips.get(&user).copied().unwrap_or(0);
let limit = config
.access
.user_max_unique_ips
.get(&user)
.copied()
.filter(|limit| *limit > 0)
.or(
(config.access.user_max_unique_ips_global_each > 0)
.then_some(config.access.user_max_unique_ips_global_each),
)
.unwrap_or(0);
let utilization = if limit > 0 {
current as f64 / limit as f64
} else {
@@ -1904,6 +1914,25 @@ mod tests {
assert!(output.contains("telemt_user_unique_ips_recent_window{user="));
}
#[tokio::test]
async fn test_render_uses_global_each_unique_ip_limit() {
let stats = Stats::new();
stats.increment_user_connects("alice");
stats.increment_user_curr_connects("alice");
let tracker = UserIpTracker::new();
tracker
.check_and_add("alice", "203.0.113.10".parse().unwrap())
.await
.unwrap();
let mut config = ProxyConfig::default();
config.access.user_max_unique_ips_global_each = 2;
let output = render_metrics(&stats, &config, &tracker).await;
assert!(output.contains("telemt_user_unique_ips_limit{user=\"alice\"} 2"));
assert!(output.contains("telemt_user_unique_ips_utilization{user=\"alice\"} 0.500000"));
}
#[tokio::test]
async fn test_render_has_type_annotations() {
let stats = Stats::new();

View File

@@ -8,9 +8,10 @@ use tokio::task::JoinSet;
use tokio::time::timeout;
use tracing::{debug, info, warn};
use crate::config::NetworkConfig;
use crate::config::{NetworkConfig, UpstreamConfig, UpstreamType};
use crate::error::Result;
use crate::network::stun::{stun_probe_dual, DualStunResult, IpFamily, StunProbeResult};
use crate::network::stun::{stun_probe_family_with_bind, DualStunResult, IpFamily, StunProbeResult};
use crate::transport::UpstreamManager;
#[derive(Debug, Clone, Default)]
pub struct NetworkProbe {
@@ -57,19 +58,22 @@ const STUN_BATCH_TIMEOUT: Duration = Duration::from_secs(5);
pub async fn run_probe(
config: &NetworkConfig,
upstreams: &[UpstreamConfig],
nat_probe: bool,
stun_nat_probe_concurrency: usize,
) -> Result<NetworkProbe> {
let mut probe = NetworkProbe::default();
let servers = collect_stun_servers(config);
let mut detected_ipv4 = detect_local_ip_v4();
let mut detected_ipv6 = detect_local_ip_v6();
let mut explicit_detected_ipv4 = false;
let mut explicit_detected_ipv6 = false;
let mut explicit_reflected_ipv4 = false;
let mut explicit_reflected_ipv6 = false;
let mut strict_bind_ipv4_requested = false;
let mut strict_bind_ipv6_requested = false;
probe.detected_ipv4 = detect_local_ip_v4();
probe.detected_ipv6 = detect_local_ip_v6();
probe.ipv4_is_bogon = probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false);
probe.ipv6_is_bogon = probe.detected_ipv6.map(is_bogon_v6).unwrap_or(false);
let stun_res = if nat_probe && config.stun_use {
let servers = collect_stun_servers(config);
let global_stun_res = if nat_probe && config.stun_use {
if servers.is_empty() {
warn!("STUN probe is enabled but network.stun_servers is empty");
DualStunResult::default()
@@ -77,6 +81,8 @@ pub async fn run_probe(
probe_stun_servers_parallel(
&servers,
stun_nat_probe_concurrency.max(1),
None,
None,
)
.await
}
@@ -86,8 +92,108 @@ pub async fn run_probe(
} else {
DualStunResult::default()
};
probe.reflected_ipv4 = stun_res.v4.map(|r| r.reflected_addr);
probe.reflected_ipv6 = stun_res.v6.map(|r| r.reflected_addr);
let mut reflected_ipv4 = global_stun_res.v4.map(|r| r.reflected_addr);
let mut reflected_ipv6 = global_stun_res.v6.map(|r| r.reflected_addr);
for upstream in upstreams.iter().filter(|upstream| upstream.enabled) {
let UpstreamType::Direct {
interface,
bind_addresses,
} = &upstream.upstream_type else {
continue;
};
if let Some(addrs) = bind_addresses.as_ref().filter(|v| !v.is_empty()) {
let mut saw_parsed_ip = false;
for value in addrs {
if let Ok(ip) = value.parse::<IpAddr>() {
saw_parsed_ip = true;
if ip.is_ipv4() {
strict_bind_ipv4_requested = true;
} else {
strict_bind_ipv6_requested = true;
}
}
}
if !saw_parsed_ip {
strict_bind_ipv4_requested = true;
strict_bind_ipv6_requested = true;
}
}
let bind_v4 = UpstreamManager::resolve_bind_address(
interface,
bind_addresses,
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(198, 51, 100, 1)), 443),
None,
true,
);
let bind_v6 = UpstreamManager::resolve_bind_address(
interface,
bind_addresses,
SocketAddr::new(
IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1)),
443,
),
None,
true,
);
if let Some(IpAddr::V4(ip)) = bind_v4
&& !explicit_detected_ipv4
{
detected_ipv4 = Some(ip);
explicit_detected_ipv4 = true;
}
if let Some(IpAddr::V6(ip)) = bind_v6
&& !explicit_detected_ipv6
{
detected_ipv6 = Some(ip);
explicit_detected_ipv6 = true;
}
if bind_v4.is_none() && bind_v6.is_none() {
continue;
}
if !(nat_probe && config.stun_use) || servers.is_empty() {
continue;
}
let direct_stun_res = probe_stun_servers_parallel(
&servers,
stun_nat_probe_concurrency.max(1),
bind_v4,
bind_v6,
)
.await;
if let Some(reflected) = direct_stun_res.v4.map(|r| r.reflected_addr) {
reflected_ipv4 = Some(reflected);
explicit_reflected_ipv4 = true;
}
if let Some(reflected) = direct_stun_res.v6.map(|r| r.reflected_addr) {
reflected_ipv6 = Some(reflected);
explicit_reflected_ipv6 = true;
}
}
if strict_bind_ipv4_requested && !explicit_detected_ipv4 {
detected_ipv4 = None;
reflected_ipv4 = None;
} else if strict_bind_ipv4_requested && !explicit_reflected_ipv4 {
reflected_ipv4 = None;
}
if strict_bind_ipv6_requested && !explicit_detected_ipv6 {
detected_ipv6 = None;
reflected_ipv6 = None;
} else if strict_bind_ipv6_requested && !explicit_reflected_ipv6 {
reflected_ipv6 = None;
}
probe.detected_ipv4 = detected_ipv4;
probe.detected_ipv6 = detected_ipv6;
probe.reflected_ipv4 = reflected_ipv4;
probe.reflected_ipv6 = reflected_ipv6;
probe.ipv4_is_bogon = probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false);
probe.ipv6_is_bogon = probe.detected_ipv6.map(is_bogon_v6).unwrap_or(false);
// If STUN is blocked but IPv4 is private, try HTTP public-IP fallback.
if nat_probe
@@ -162,6 +268,8 @@ fn collect_stun_servers(config: &NetworkConfig) -> Vec<String> {
async fn probe_stun_servers_parallel(
servers: &[String],
concurrency: usize,
bind_v4: Option<IpAddr>,
bind_v6: Option<IpAddr>,
) -> DualStunResult {
let mut join_set = JoinSet::new();
let mut next_idx = 0usize;
@@ -172,8 +280,15 @@ async fn probe_stun_servers_parallel(
while next_idx < servers.len() && join_set.len() < concurrency {
let stun_addr = servers[next_idx].clone();
next_idx += 1;
let bind_v4 = bind_v4;
let bind_v6 = bind_v6;
join_set.spawn(async move {
let res = timeout(STUN_BATCH_TIMEOUT, stun_probe_dual(&stun_addr)).await;
let res = timeout(STUN_BATCH_TIMEOUT, async {
let v4 = stun_probe_family_with_bind(&stun_addr, IpFamily::V4, bind_v4).await?;
let v6 = stun_probe_family_with_bind(&stun_addr, IpFamily::V6, bind_v6).await?;
Ok::<DualStunResult, crate::error::ProxyError>(DualStunResult { v4, v6 })
})
.await;
(stun_addr, res)
});
}
@@ -226,18 +341,24 @@ async fn probe_stun_servers_parallel(
out
}
pub fn decide_network_capabilities(config: &NetworkConfig, probe: &NetworkProbe) -> NetworkDecision {
pub fn decide_network_capabilities(
config: &NetworkConfig,
probe: &NetworkProbe,
middle_proxy_nat_ip: Option<IpAddr>,
) -> NetworkDecision {
let ipv4_dc = config.ipv4 && probe.detected_ipv4.is_some();
let ipv6_dc = config.ipv6.unwrap_or(probe.detected_ipv6.is_some()) && probe.detected_ipv6.is_some();
let nat_ip_v4 = matches!(middle_proxy_nat_ip, Some(IpAddr::V4(_)));
let nat_ip_v6 = matches!(middle_proxy_nat_ip, Some(IpAddr::V6(_)));
let ipv4_me = config.ipv4
&& probe.detected_ipv4.is_some()
&& (!probe.ipv4_is_bogon || probe.reflected_ipv4.is_some());
&& (!probe.ipv4_is_bogon || probe.reflected_ipv4.is_some() || nat_ip_v4);
let ipv6_enabled = config.ipv6.unwrap_or(probe.detected_ipv6.is_some());
let ipv6_me = ipv6_enabled
&& probe.detected_ipv6.is_some()
&& (!probe.ipv6_is_bogon || probe.reflected_ipv6.is_some());
&& (!probe.ipv6_is_bogon || probe.reflected_ipv6.is_some() || nat_ip_v6);
let effective_prefer = match config.prefer {
6 if ipv6_me || ipv6_dc => 6,
@@ -262,6 +383,58 @@ pub fn decide_network_capabilities(config: &NetworkConfig, probe: &NetworkProbe)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::config::NetworkConfig;
#[test]
fn manual_nat_ip_enables_ipv4_me_without_reflection() {
let config = NetworkConfig {
ipv4: true,
..Default::default()
};
let probe = NetworkProbe {
detected_ipv4: Some(Ipv4Addr::new(10, 0, 0, 10)),
ipv4_is_bogon: true,
..Default::default()
};
let decision = decide_network_capabilities(
&config,
&probe,
Some(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4))),
);
assert!(decision.ipv4_me);
}
#[test]
fn manual_nat_ip_does_not_enable_other_family() {
let config = NetworkConfig {
ipv4: true,
ipv6: Some(true),
..Default::default()
};
let probe = NetworkProbe {
detected_ipv4: Some(Ipv4Addr::new(10, 0, 0, 10)),
detected_ipv6: Some(Ipv6Addr::LOCALHOST),
ipv4_is_bogon: true,
ipv6_is_bogon: true,
..Default::default()
};
let decision = decide_network_capabilities(
&config,
&probe,
Some(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4))),
);
assert!(decision.ipv4_me);
assert!(!decision.ipv6_me);
}
}
fn detect_local_ip_v4() -> Option<Ipv4Addr> {
let socket = UdpSocket::bind("0.0.0.0:0").ok()?;
socket.connect("8.8.8.8:80").ok()?;

View File

@@ -1,6 +1,6 @@
use std::sync::Arc;
use std::sync::atomic::{AtomicU8, AtomicU64, Ordering};
use std::time::Duration;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use tokio::sync::watch;
@@ -43,6 +43,7 @@ pub(crate) struct RouteCutoverState {
pub(crate) struct RouteRuntimeController {
mode: Arc<AtomicU8>,
generation: Arc<AtomicU64>,
direct_since_epoch_secs: Arc<AtomicU64>,
tx: watch::Sender<RouteCutoverState>,
}
@@ -53,9 +54,15 @@ impl RouteRuntimeController {
generation: 0,
};
let (tx, _rx) = watch::channel(initial);
let direct_since_epoch_secs = if matches!(initial_mode, RelayRouteMode::Direct) {
now_epoch_secs()
} else {
0
};
Self {
mode: Arc::new(AtomicU8::new(initial_mode.as_u8())),
generation: Arc::new(AtomicU64::new(0)),
direct_since_epoch_secs: Arc::new(AtomicU64::new(direct_since_epoch_secs)),
tx,
}
}
@@ -71,11 +78,22 @@ impl RouteRuntimeController {
self.tx.subscribe()
}
pub(crate) fn direct_since_epoch_secs(&self) -> Option<u64> {
let value = self.direct_since_epoch_secs.load(Ordering::Relaxed);
(value > 0).then_some(value)
}
pub(crate) fn set_mode(&self, mode: RelayRouteMode) -> Option<RouteCutoverState> {
let previous = self.mode.swap(mode.as_u8(), Ordering::Relaxed);
if previous == mode.as_u8() {
return None;
}
if matches!(mode, RelayRouteMode::Direct) {
self.direct_since_epoch_secs
.store(now_epoch_secs(), Ordering::Relaxed);
} else {
self.direct_since_epoch_secs.store(0, Ordering::Relaxed);
}
let generation = self.generation.fetch_add(1, Ordering::Relaxed) + 1;
let next = RouteCutoverState { mode, generation };
self.tx.send_replace(next);
@@ -83,6 +101,13 @@ impl RouteRuntimeController {
}
}
fn now_epoch_secs() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|value| value.as_secs())
.unwrap_or(0)
}
pub(crate) fn is_session_affected_by_cutover(
current: RouteCutoverState,
_session_mode: RelayRouteMode,

View File

@@ -8,7 +8,9 @@ use tokio::sync::RwLock;
use tokio::time::sleep;
use tracing::{debug, warn, info};
use crate::tls_front::types::{CachedTlsData, ParsedServerHello, TlsFetchResult};
use crate::tls_front::types::{
CachedTlsData, ParsedServerHello, TlsBehaviorProfile, TlsFetchResult,
};
/// Lightweight in-memory + optional on-disk cache for TLS fronting data.
#[derive(Debug)]
@@ -37,6 +39,7 @@ impl TlsFrontCache {
cert_payload: None,
app_data_records_sizes: vec![default_len],
total_app_data_len: default_len,
behavior_profile: TlsBehaviorProfile::default(),
fetched_at: SystemTime::now(),
domain: "default".to_string(),
});
@@ -189,6 +192,7 @@ impl TlsFrontCache {
cert_payload: fetched.cert_payload,
app_data_records_sizes: fetched.app_data_records_sizes.clone(),
total_app_data_len: fetched.total_app_data_len,
behavior_profile: fetched.behavior_profile,
fetched_at: SystemTime::now(),
domain: domain.to_string(),
};

View File

@@ -3,7 +3,7 @@ use crate::protocol::constants::{
TLS_RECORD_APPLICATION, TLS_RECORD_CHANGE_CIPHER, TLS_RECORD_HANDSHAKE, TLS_VERSION,
};
use crate::protocol::tls::{TLS_DIGEST_LEN, TLS_DIGEST_POS, gen_fake_x25519_key};
use crate::tls_front::types::{CachedTlsData, ParsedCertificateInfo};
use crate::tls_front::types::{CachedTlsData, ParsedCertificateInfo, TlsProfileSource};
const MIN_APP_DATA: usize = 64;
const MAX_APP_DATA: usize = 16640; // RFC 8446 §5.2 allows up to 2^14 + 256
@@ -108,14 +108,12 @@ pub fn build_emulated_server_hello(
) -> Vec<u8> {
// --- ServerHello ---
let mut extensions = Vec::new();
// KeyShare (x25519)
let key = gen_fake_x25519_key(rng);
extensions.extend_from_slice(&0x0033u16.to_be_bytes()); // key_share
extensions.extend_from_slice(&(2 + 2 + 32u16).to_be_bytes()); // len
extensions.extend_from_slice(&0x001du16.to_be_bytes()); // X25519
extensions.extend_from_slice(&0x0033u16.to_be_bytes());
extensions.extend_from_slice(&(2 + 2 + 32u16).to_be_bytes());
extensions.extend_from_slice(&0x001du16.to_be_bytes());
extensions.extend_from_slice(&(32u16).to_be_bytes());
extensions.extend_from_slice(&key);
// supported_versions (TLS1.3)
extensions.extend_from_slice(&0x002bu16.to_be_bytes());
extensions.extend_from_slice(&(2u16).to_be_bytes());
extensions.extend_from_slice(&0x0304u16.to_be_bytes());
@@ -128,7 +126,6 @@ pub fn build_emulated_server_hello(
extensions.push(alpn_proto.len() as u8);
extensions.extend_from_slice(alpn_proto);
}
let extensions_len = extensions.len() as u16;
let body_len = 2 + // version
@@ -173,11 +170,22 @@ pub fn build_emulated_server_hello(
];
// --- ApplicationData (fake encrypted records) ---
// Use the same number and sizes of ApplicationData records as the cached server.
let mut sizes = cached.app_data_records_sizes.clone();
if sizes.is_empty() {
sizes.push(cached.total_app_data_len.max(1024));
}
let sizes = match cached.behavior_profile.source {
TlsProfileSource::Raw | TlsProfileSource::Merged => cached
.app_data_records_sizes
.first()
.copied()
.or_else(|| cached.behavior_profile.app_data_record_sizes.first().copied())
.map(|size| vec![size])
.unwrap_or_else(|| vec![cached.total_app_data_len.max(1024)]),
_ => {
let mut sizes = cached.app_data_records_sizes.clone();
if sizes.is_empty() {
sizes.push(cached.total_app_data_len.max(1024));
}
sizes
}
};
let mut sizes = jitter_and_clamp_sizes(&sizes, rng);
let compact_payload = cached
.cert_info
@@ -269,7 +277,9 @@ pub fn build_emulated_server_hello(
mod tests {
use std::time::SystemTime;
use crate::tls_front::types::{CachedTlsData, ParsedServerHello, TlsCertPayload};
use crate::tls_front::types::{
CachedTlsData, ParsedServerHello, TlsBehaviorProfile, TlsCertPayload, TlsProfileSource,
};
use super::build_emulated_server_hello;
use crate::crypto::SecureRandom;
@@ -300,6 +310,7 @@ mod tests {
cert_payload,
app_data_records_sizes: vec![64],
total_app_data_len: 64,
behavior_profile: TlsBehaviorProfile::default(),
fetched_at: SystemTime::now(),
domain: "example.com".to_string(),
}
@@ -385,4 +396,34 @@ mod tests {
let payload = first_app_data_payload(&response);
assert!(payload.starts_with(b"CN=example.com"));
}
#[test]
fn test_build_emulated_server_hello_ignores_tail_records_for_raw_profile() {
let mut cached = make_cached(None);
cached.app_data_records_sizes = vec![27, 3905, 537, 69];
cached.total_app_data_len = 4538;
cached.behavior_profile.source = TlsProfileSource::Merged;
cached.behavior_profile.app_data_record_sizes = vec![27, 3905, 537];
cached.behavior_profile.ticket_record_sizes = vec![69];
let rng = SecureRandom::new();
let response = build_emulated_server_hello(
b"secret",
&[0x12; 32],
&[0x34; 16],
&cached,
false,
&rng,
None,
0,
);
let hello_len = u16::from_be_bytes([response[3], response[4]]) as usize;
let ccs_start = 5 + hello_len;
let app_start = ccs_start + 6;
let app_len = u16::from_be_bytes([response[app_start + 3], response[app_start + 4]]) as usize;
assert_eq!(response[app_start], TLS_RECORD_APPLICATION);
assert_eq!(app_start + 5 + app_len, response.len());
}
}

View File

@@ -21,14 +21,18 @@ use x509_parser::certificate::X509Certificate;
use crate::crypto::SecureRandom;
use crate::network::dns_overrides::resolve_socket_addr;
use crate::protocol::constants::{TLS_RECORD_APPLICATION, TLS_RECORD_HANDSHAKE};
use crate::protocol::constants::{
TLS_RECORD_APPLICATION, TLS_RECORD_CHANGE_CIPHER, TLS_RECORD_HANDSHAKE,
};
use crate::transport::proxy_protocol::{ProxyProtocolV1Builder, ProxyProtocolV2Builder};
use crate::tls_front::types::{
ParsedCertificateInfo,
ParsedServerHello,
TlsBehaviorProfile,
TlsCertPayload,
TlsExtension,
TlsFetchResult,
TlsProfileSource,
};
/// No-op verifier: accept any certificate (we only need lengths and metadata).
@@ -282,6 +286,41 @@ fn parse_server_hello(body: &[u8]) -> Option<ParsedServerHello> {
})
}
fn derive_behavior_profile(records: &[(u8, Vec<u8>)]) -> TlsBehaviorProfile {
let mut change_cipher_spec_count = 0u8;
let mut app_data_record_sizes = Vec::new();
for (record_type, body) in records {
match *record_type {
TLS_RECORD_CHANGE_CIPHER => {
change_cipher_spec_count = change_cipher_spec_count.saturating_add(1);
}
TLS_RECORD_APPLICATION => {
app_data_record_sizes.push(body.len());
}
_ => {}
}
}
let mut ticket_record_sizes = Vec::new();
while app_data_record_sizes
.last()
.is_some_and(|size| *size <= 256 && ticket_record_sizes.len() < 2)
{
if let Some(size) = app_data_record_sizes.pop() {
ticket_record_sizes.push(size);
}
}
ticket_record_sizes.reverse();
TlsBehaviorProfile {
change_cipher_spec_count: change_cipher_spec_count.max(1),
app_data_record_sizes,
ticket_record_sizes,
source: TlsProfileSource::Raw,
}
}
fn parse_cert_info(certs: &[CertificateDer<'static>]) -> Option<ParsedCertificateInfo> {
let first = certs.first()?;
let (_rem, cert) = X509Certificate::from_der(first.as_ref()).ok()?;
@@ -443,39 +482,50 @@ where
.await??;
let mut records = Vec::new();
// Read up to 4 records: ServerHello, CCS, and up to two ApplicationData.
for _ in 0..4 {
let mut app_records_seen = 0usize;
// Read a bounded encrypted flight: ServerHello, CCS, certificate-like data,
// and a small number of ticket-like tail records.
for _ in 0..8 {
match timeout(connect_timeout, read_tls_record(&mut stream)).await {
Ok(Ok(rec)) => records.push(rec),
Ok(Ok(rec)) => {
if rec.0 == TLS_RECORD_APPLICATION {
app_records_seen += 1;
}
records.push(rec);
}
Ok(Err(e)) => return Err(e),
Err(_) => break,
}
if records.len() >= 3 && records.iter().any(|(t, _)| *t == TLS_RECORD_APPLICATION) {
if app_records_seen >= 4 {
break;
}
}
let mut app_sizes = Vec::new();
let mut server_hello = None;
for (t, body) in &records {
if *t == TLS_RECORD_HANDSHAKE && server_hello.is_none() {
server_hello = parse_server_hello(body);
} else if *t == TLS_RECORD_APPLICATION {
app_sizes.push(body.len());
}
}
let parsed = server_hello.ok_or_else(|| anyhow!("ServerHello not received"))?;
let behavior_profile = derive_behavior_profile(&records);
let mut app_sizes = behavior_profile.app_data_record_sizes.clone();
app_sizes.extend_from_slice(&behavior_profile.ticket_record_sizes);
let total_app_data_len = app_sizes.iter().sum::<usize>().max(1024);
let app_data_records_sizes = behavior_profile
.app_data_record_sizes
.first()
.copied()
.or_else(|| behavior_profile.ticket_record_sizes.first().copied())
.map(|size| vec![size])
.unwrap_or_else(|| vec![total_app_data_len]);
Ok(TlsFetchResult {
server_hello_parsed: parsed,
app_data_records_sizes: if app_sizes.is_empty() {
vec![total_app_data_len]
} else {
app_sizes
},
app_data_records_sizes,
total_app_data_len,
behavior_profile,
cert_info: None,
cert_payload: None,
})
@@ -608,6 +658,12 @@ where
server_hello_parsed: parsed,
app_data_records_sizes: app_data_records_sizes.clone(),
total_app_data_len: app_data_records_sizes.iter().sum(),
behavior_profile: TlsBehaviorProfile {
change_cipher_spec_count: 1,
app_data_record_sizes: app_data_records_sizes,
ticket_record_sizes: Vec::new(),
source: TlsProfileSource::Rustls,
},
cert_info,
cert_payload,
})
@@ -706,6 +762,7 @@ pub async fn fetch_real_tls(
if let Some(mut raw) = raw_result {
raw.cert_info = rustls_result.cert_info;
raw.cert_payload = rustls_result.cert_payload;
raw.behavior_profile.source = TlsProfileSource::Merged;
debug!(sni = %sni, "Fetched TLS metadata via raw probe + rustls cert chain");
Ok(raw)
} else {
@@ -725,7 +782,11 @@ pub async fn fetch_real_tls(
#[cfg(test)]
mod tests {
use super::encode_tls13_certificate_message;
use super::{derive_behavior_profile, encode_tls13_certificate_message};
use crate::protocol::constants::{
TLS_RECORD_APPLICATION, TLS_RECORD_CHANGE_CIPHER, TLS_RECORD_HANDSHAKE,
};
use crate::tls_front::types::TlsProfileSource;
fn read_u24(bytes: &[u8]) -> usize {
((bytes[0] as usize) << 16) | ((bytes[1] as usize) << 8) | (bytes[2] as usize)
@@ -753,4 +814,20 @@ mod tests {
fn test_encode_tls13_certificate_message_empty_chain() {
assert!(encode_tls13_certificate_message(&[]).is_none());
}
#[test]
fn test_derive_behavior_profile_splits_ticket_like_tail_records() {
let profile = derive_behavior_profile(&[
(TLS_RECORD_HANDSHAKE, vec![0u8; 90]),
(TLS_RECORD_CHANGE_CIPHER, vec![0x01]),
(TLS_RECORD_APPLICATION, vec![0u8; 1400]),
(TLS_RECORD_APPLICATION, vec![0u8; 220]),
(TLS_RECORD_APPLICATION, vec![0u8; 180]),
]);
assert_eq!(profile.change_cipher_spec_count, 1);
assert_eq!(profile.app_data_record_sizes, vec![1400]);
assert_eq!(profile.ticket_record_sizes, vec![220, 180]);
assert_eq!(profile.source, TlsProfileSource::Raw);
}
}

View File

@@ -39,6 +39,53 @@ pub struct TlsCertPayload {
pub certificate_message: Vec<u8>,
}
/// Provenance of the cached TLS behavior profile.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Default)]
#[serde(rename_all = "snake_case")]
pub enum TlsProfileSource {
/// Built from hardcoded defaults or legacy cache entries.
#[default]
Default,
/// Derived from raw TLS record capture only.
Raw,
/// Derived from rustls-only metadata fallback.
Rustls,
/// Merged from raw TLS capture and rustls certificate metadata.
Merged,
}
/// Coarse-grained TLS response behavior captured per SNI.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TlsBehaviorProfile {
/// Number of ChangeCipherSpec records observed before encrypted flight.
#[serde(default = "default_change_cipher_spec_count")]
pub change_cipher_spec_count: u8,
/// Sizes of the primary encrypted flight records carrying cert-like payload.
#[serde(default)]
pub app_data_record_sizes: Vec<usize>,
/// Sizes of small tail ApplicationData records that look like tickets.
#[serde(default)]
pub ticket_record_sizes: Vec<usize>,
/// Source of this behavior profile.
#[serde(default)]
pub source: TlsProfileSource,
}
fn default_change_cipher_spec_count() -> u8 {
1
}
impl Default for TlsBehaviorProfile {
fn default() -> Self {
Self {
change_cipher_spec_count: default_change_cipher_spec_count(),
app_data_record_sizes: Vec::new(),
ticket_record_sizes: Vec::new(),
source: TlsProfileSource::Default,
}
}
}
/// Cached data per SNI used by the emulator.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CachedTlsData {
@@ -48,6 +95,8 @@ pub struct CachedTlsData {
pub cert_payload: Option<TlsCertPayload>,
pub app_data_records_sizes: Vec<usize>,
pub total_app_data_len: usize,
#[serde(default)]
pub behavior_profile: TlsBehaviorProfile,
#[serde(default = "now_system_time", skip_serializing, skip_deserializing)]
pub fetched_at: SystemTime,
pub domain: String,
@@ -63,6 +112,40 @@ pub struct TlsFetchResult {
pub server_hello_parsed: ParsedServerHello,
pub app_data_records_sizes: Vec<usize>,
pub total_app_data_len: usize,
#[serde(default)]
pub behavior_profile: TlsBehaviorProfile,
pub cert_info: Option<ParsedCertificateInfo>,
pub cert_payload: Option<TlsCertPayload>,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn cached_tls_data_deserializes_without_behavior_profile() {
let json = r#"
{
"server_hello_template": {
"version": [3, 3],
"random": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"session_id": [],
"cipher_suite": [19, 1],
"compression": 0,
"extensions": []
},
"cert_info": null,
"cert_payload": null,
"app_data_records_sizes": [1024],
"total_app_data_len": 1024,
"domain": "example.com"
}
"#;
let cached: CachedTlsData = serde_json::from_str(json).unwrap();
assert_eq!(cached.behavior_profile.change_cipher_spec_count, 1);
assert!(cached.behavior_profile.app_data_record_sizes.is_empty());
assert!(cached.behavior_profile.ticket_record_sizes.is_empty());
assert_eq!(cached.behavior_profile.source, TlsProfileSource::Default);
}
}

View File

@@ -298,6 +298,7 @@ async fn run_update_cycle(
pool.update_runtime_reinit_policy(
cfg.general.hardswap,
cfg.general.me_pool_drain_ttl_secs,
cfg.general.me_pool_drain_threshold,
cfg.general.effective_me_pool_force_close_secs(),
cfg.general.me_pool_min_fresh_ratio,
cfg.general.me_hardswap_warmup_delay_min_ms,
@@ -524,6 +525,7 @@ pub async fn me_config_updater(
pool.update_runtime_reinit_policy(
cfg.general.hardswap,
cfg.general.me_pool_drain_ttl_secs,
cfg.general.me_pool_drain_threshold,
cfg.general.effective_me_pool_force_close_secs(),
cfg.general.me_pool_min_fresh_ratio,
cfg.general.me_hardswap_warmup_delay_min_ms,

View File

@@ -33,7 +33,7 @@ use super::codec::{
cbc_decrypt_inplace, cbc_encrypt_padded, parse_handshake_flags, parse_nonce_payload,
read_rpc_frame_plaintext, rpc_crc,
};
use super::selftest::{BndAddrStatus, BndPortStatus, record_bnd_status};
use super::selftest::{BndAddrStatus, BndPortStatus, record_bnd_status, record_upstream_bnd_status};
use super::wire::{extract_ip_material, IpMaterial};
use super::MePool;
@@ -59,6 +59,7 @@ impl KdfClientPortSource {
pub(crate) struct HandshakeOutput {
pub rd: ReadHalf<TcpStream>,
pub wr: WriteHalf<TcpStream>,
pub source_ip: IpAddr,
pub read_key: [u8; 32],
pub read_iv: [u8; 16],
pub write_key: [u8; 32],
@@ -199,10 +200,26 @@ impl MePool {
fn configure_keepalive(stream: &TcpStream) -> std::io::Result<()> {
let sock = SockRef::from(stream);
let ka = TcpKeepalive::new()
.with_time(Duration::from_secs(30))
.with_interval(Duration::from_secs(10))
.with_retries(3);
let ka = TcpKeepalive::new().with_time(Duration::from_secs(30));
// Mirror socket2 v0.5.10 target gate for with_retries(), the stricter method.
#[cfg(any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "illumos",
target_os = "ios",
target_os = "visionos",
target_os = "linux",
target_os = "macos",
target_os = "netbsd",
target_os = "tvos",
target_os = "watchos",
target_os = "cygwin",
))]
let ka = ka.with_interval(Duration::from_secs(10)).with_retries(3);
sock.set_tcp_keepalive(&ka)?;
sock.set_keepalive(true)?;
Ok(())
@@ -299,6 +316,18 @@ impl MePool {
let local_addr_nat = self.translate_our_addr_with_reflection(local_addr, reflected);
let peer_addr_nat = SocketAddr::new(self.translate_ip_for_nat(peer_addr.ip()), peer_addr.port());
if let Some(upstream_info) = upstream_egress {
let client_ip_for_kdf = socks_bound_addr
.map(|value| value.ip())
.unwrap_or(local_addr_nat.ip());
record_upstream_bnd_status(
upstream_info.upstream_id,
bnd_addr_status,
bnd_port_status,
raw_socks_bound_addr,
Some(client_ip_for_kdf),
);
}
let (mut rd, mut wr) = tokio::io::split(stream);
let my_nonce: [u8; 16] = rng.bytes(16).try_into().unwrap();
@@ -661,6 +690,7 @@ impl MePool {
Ok(HandshakeOutput {
rd,
wr,
source_ip: local_addr_nat.ip(),
read_key: rk,
read_iv,
write_key: wk,
@@ -685,3 +715,66 @@ fn hex_dump(data: &[u8]) -> String {
}
out
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::ErrorKind;
use tokio::net::{TcpListener, TcpStream};
#[tokio::test]
async fn test_configure_keepalive_loopback() {
let listener = match TcpListener::bind("127.0.0.1:0").await {
Ok(listener) => listener,
Err(error) if error.kind() == ErrorKind::PermissionDenied => return,
Err(error) => panic!("bind failed: {error}"),
};
let addr = match listener.local_addr() {
Ok(addr) => addr,
Err(error) => panic!("local_addr failed: {error}"),
};
let stream = match TcpStream::connect(addr).await {
Ok(stream) => stream,
Err(error) if error.kind() == ErrorKind::PermissionDenied => return,
Err(error) => panic!("connect failed: {error}"),
};
if let Err(error) = MePool::configure_keepalive(&stream) {
if error.kind() == ErrorKind::PermissionDenied {
return;
}
panic!("configure_keepalive failed: {error}");
}
}
#[test]
#[cfg(target_os = "openbsd")]
fn test_openbsd_keepalive_cfg_path_compiles() {
let _ka = TcpKeepalive::new().with_time(Duration::from_secs(30));
}
#[test]
#[cfg(any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "illumos",
target_os = "ios",
target_os = "visionos",
target_os = "linux",
target_os = "macos",
target_os = "netbsd",
target_os = "tvos",
target_os = "watchos",
target_os = "cygwin",
))]
fn test_retry_keepalive_cfg_path_compiles() {
let _ka = TcpKeepalive::new()
.with_time(Duration::from_secs(30))
.with_interval(Duration::from_secs(10))
.with_retries(3);
}
}

View File

@@ -62,6 +62,7 @@ pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, _min_c
let mut adaptive_idle_since: HashMap<(i32, IpFamily), Instant> = HashMap::new();
let mut adaptive_recover_until: HashMap<(i32, IpFamily), Instant> = HashMap::new();
let mut floor_warn_next_allowed: HashMap<(i32, IpFamily), Instant> = HashMap::new();
let mut drain_warn_next_allowed: HashMap<u64, Instant> = HashMap::new();
let mut degraded_interval = true;
loop {
let interval = if degraded_interval {
@@ -71,7 +72,7 @@ pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, _min_c
};
tokio::time::sleep(interval).await;
pool.prune_closed_writers().await;
reap_draining_writers(&pool).await;
reap_draining_writers(&pool, &mut drain_warn_next_allowed).await;
let v4_degraded = check_family(
IpFamily::V4,
&pool,
@@ -110,17 +111,81 @@ pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, _min_c
}
}
async fn reap_draining_writers(pool: &Arc<MePool>) {
async fn reap_draining_writers(
pool: &Arc<MePool>,
warn_next_allowed: &mut HashMap<u64, Instant>,
) {
let now_epoch_secs = MePool::now_epoch_secs();
let now = Instant::now();
let drain_ttl_secs = pool.me_pool_drain_ttl_secs.load(std::sync::atomic::Ordering::Relaxed);
let drain_threshold = pool
.me_pool_drain_threshold
.load(std::sync::atomic::Ordering::Relaxed);
let writers = pool.writers.read().await.clone();
let mut draining_writers = Vec::new();
for writer in writers {
if !writer.draining.load(std::sync::atomic::Ordering::Relaxed) {
continue;
}
if pool.registry.is_writer_empty(writer.id).await {
let is_empty = pool.registry.is_writer_empty(writer.id).await;
if is_empty {
pool.remove_writer_and_close_clients(writer.id).await;
continue;
}
draining_writers.push(writer);
}
if drain_threshold > 0 && draining_writers.len() > drain_threshold as usize {
draining_writers.sort_by(|left, right| {
let left_started = left
.draining_started_at_epoch_secs
.load(std::sync::atomic::Ordering::Relaxed);
let right_started = right
.draining_started_at_epoch_secs
.load(std::sync::atomic::Ordering::Relaxed);
left_started
.cmp(&right_started)
.then_with(|| left.created_at.cmp(&right.created_at))
.then_with(|| left.id.cmp(&right.id))
});
let overflow = draining_writers.len().saturating_sub(drain_threshold as usize);
warn!(
draining_writers = draining_writers.len(),
me_pool_drain_threshold = drain_threshold,
removing_writers = overflow,
"ME draining writer threshold exceeded, force-closing oldest draining writers"
);
for writer in draining_writers.drain(..overflow) {
pool.stats.increment_pool_force_close_total();
pool.remove_writer_and_close_clients(writer.id).await;
}
}
for writer in draining_writers {
let drain_started_at_epoch_secs = writer
.draining_started_at_epoch_secs
.load(std::sync::atomic::Ordering::Relaxed);
if drain_ttl_secs > 0
&& drain_started_at_epoch_secs != 0
&& now_epoch_secs.saturating_sub(drain_started_at_epoch_secs) > drain_ttl_secs
&& should_emit_writer_warn(
warn_next_allowed,
writer.id,
now,
pool.warn_rate_limit_duration(),
)
{
warn!(
writer_id = writer.id,
writer_dc = writer.writer_dc,
endpoint = %writer.addr,
generation = writer.generation,
drain_ttl_secs,
force_close_secs = pool.me_pool_force_close_secs.load(std::sync::atomic::Ordering::Relaxed),
allow_drain_fallback = writer.allow_drain_fallback.load(std::sync::atomic::Ordering::Relaxed),
"ME draining writer remains non-empty past drain TTL"
);
}
let deadline_epoch_secs = writer
.drain_deadline_epoch_secs
.load(std::sync::atomic::Ordering::Relaxed);
@@ -132,6 +197,23 @@ async fn reap_draining_writers(pool: &Arc<MePool>) {
}
}
fn should_emit_writer_warn(
next_allowed: &mut HashMap<u64, Instant>,
writer_id: u64,
now: Instant,
cooldown: Duration,
) -> bool {
let Some(ready_at) = next_allowed.get(&writer_id).copied() else {
next_allowed.insert(writer_id, now + cooldown);
return true;
};
if now >= ready_at {
next_allowed.insert(writer_id, now + cooldown);
return true;
}
false
}
async fn check_family(
family: IpFamily,
pool: &Arc<MePool>,
@@ -1222,3 +1304,190 @@ async fn maybe_rotate_single_endpoint_shadow(
"Single-endpoint shadow writer rotated"
);
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicU8, AtomicU32, AtomicU64, Ordering};
use std::time::{Duration, Instant};
use tokio::sync::mpsc;
use tokio_util::sync::CancellationToken;
use super::reap_draining_writers;
use crate::config::{GeneralConfig, MeRouteNoWriterMode, MeSocksKdfPolicy, MeWriterPickMode};
use crate::crypto::SecureRandom;
use crate::network::probe::NetworkDecision;
use crate::stats::Stats;
use crate::transport::middle_proxy::codec::WriterCommand;
use crate::transport::middle_proxy::pool::{MePool, MeWriter, WriterContour};
use crate::transport::middle_proxy::registry::ConnMeta;
async fn make_pool(me_pool_drain_threshold: u64) -> Arc<MePool> {
let general = GeneralConfig {
me_pool_drain_threshold,
..GeneralConfig::default()
};
MePool::new(
None,
vec![1u8; 32],
None,
false,
None,
Vec::new(),
1,
None,
12,
1200,
HashMap::new(),
HashMap::new(),
None,
NetworkDecision::default(),
None,
Arc::new(SecureRandom::new()),
Arc::new(Stats::default()),
general.me_keepalive_enabled,
general.me_keepalive_interval_secs,
general.me_keepalive_jitter_secs,
general.me_keepalive_payload_random,
general.rpc_proxy_req_every,
general.me_warmup_stagger_enabled,
general.me_warmup_step_delay_ms,
general.me_warmup_step_jitter_ms,
general.me_reconnect_max_concurrent_per_dc,
general.me_reconnect_backoff_base_ms,
general.me_reconnect_backoff_cap_ms,
general.me_reconnect_fast_retry_count,
general.me_single_endpoint_shadow_writers,
general.me_single_endpoint_outage_mode_enabled,
general.me_single_endpoint_outage_disable_quarantine,
general.me_single_endpoint_outage_backoff_min_ms,
general.me_single_endpoint_outage_backoff_max_ms,
general.me_single_endpoint_shadow_rotate_every_secs,
general.me_floor_mode,
general.me_adaptive_floor_idle_secs,
general.me_adaptive_floor_min_writers_single_endpoint,
general.me_adaptive_floor_min_writers_multi_endpoint,
general.me_adaptive_floor_recover_grace_secs,
general.me_adaptive_floor_writers_per_core_total,
general.me_adaptive_floor_cpu_cores_override,
general.me_adaptive_floor_max_extra_writers_single_per_core,
general.me_adaptive_floor_max_extra_writers_multi_per_core,
general.me_adaptive_floor_max_active_writers_per_core,
general.me_adaptive_floor_max_warm_writers_per_core,
general.me_adaptive_floor_max_active_writers_global,
general.me_adaptive_floor_max_warm_writers_global,
general.hardswap,
general.me_pool_drain_ttl_secs,
general.me_pool_drain_threshold,
general.effective_me_pool_force_close_secs(),
general.me_pool_min_fresh_ratio,
general.me_hardswap_warmup_delay_min_ms,
general.me_hardswap_warmup_delay_max_ms,
general.me_hardswap_warmup_extra_passes,
general.me_hardswap_warmup_pass_backoff_base_ms,
general.me_bind_stale_mode,
general.me_bind_stale_ttl_secs,
general.me_secret_atomic_snapshot,
general.me_deterministic_writer_sort,
MeWriterPickMode::default(),
general.me_writer_pick_sample_size,
MeSocksKdfPolicy::default(),
general.me_writer_cmd_channel_capacity,
general.me_route_channel_capacity,
general.me_route_backpressure_base_timeout_ms,
general.me_route_backpressure_high_timeout_ms,
general.me_route_backpressure_high_watermark_pct,
general.me_reader_route_data_wait_ms,
general.me_health_interval_ms_unhealthy,
general.me_health_interval_ms_healthy,
general.me_warn_rate_limit_ms,
MeRouteNoWriterMode::default(),
general.me_route_no_writer_wait_ms,
general.me_route_inline_recovery_attempts,
general.me_route_inline_recovery_wait_ms,
)
}
async fn insert_draining_writer(
pool: &Arc<MePool>,
writer_id: u64,
drain_started_at_epoch_secs: u64,
) -> u64 {
let (conn_id, _rx) = pool.registry.register().await;
let (tx, _writer_rx) = mpsc::channel::<WriterCommand>(8);
let writer = MeWriter {
id: writer_id,
addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 4000 + writer_id as u16),
source_ip: IpAddr::V4(Ipv4Addr::LOCALHOST),
writer_dc: 2,
generation: 1,
contour: Arc::new(AtomicU8::new(WriterContour::Draining.as_u8())),
created_at: Instant::now() - Duration::from_secs(writer_id),
tx: tx.clone(),
cancel: CancellationToken::new(),
degraded: Arc::new(AtomicBool::new(false)),
rtt_ema_ms_x10: Arc::new(AtomicU32::new(0)),
draining: Arc::new(AtomicBool::new(true)),
draining_started_at_epoch_secs: Arc::new(AtomicU64::new(drain_started_at_epoch_secs)),
drain_deadline_epoch_secs: Arc::new(AtomicU64::new(0)),
allow_drain_fallback: Arc::new(AtomicBool::new(false)),
};
pool.writers.write().await.push(writer);
pool.registry.register_writer(writer_id, tx).await;
pool.conn_count.fetch_add(1, Ordering::Relaxed);
assert!(
pool.registry
.bind_writer(
conn_id,
writer_id,
ConnMeta {
target_dc: 2,
client_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 6000),
our_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 443),
proto_flags: 0,
},
)
.await
);
conn_id
}
#[tokio::test]
async fn reap_draining_writers_force_closes_oldest_over_threshold() {
let pool = make_pool(2).await;
let now_epoch_secs = MePool::now_epoch_secs();
let conn_a = insert_draining_writer(&pool, 10, now_epoch_secs.saturating_sub(30)).await;
let conn_b = insert_draining_writer(&pool, 20, now_epoch_secs.saturating_sub(20)).await;
let conn_c = insert_draining_writer(&pool, 30, now_epoch_secs.saturating_sub(10)).await;
let mut warn_next_allowed = HashMap::new();
reap_draining_writers(&pool, &mut warn_next_allowed).await;
let writer_ids: Vec<u64> = pool.writers.read().await.iter().map(|writer| writer.id).collect();
assert_eq!(writer_ids, vec![20, 30]);
assert!(pool.registry.get_writer(conn_a).await.is_none());
assert_eq!(pool.registry.get_writer(conn_b).await.unwrap().writer_id, 20);
assert_eq!(pool.registry.get_writer(conn_c).await.unwrap().writer_id, 30);
}
#[tokio::test]
async fn reap_draining_writers_keeps_timeout_only_behavior_when_threshold_disabled() {
let pool = make_pool(0).await;
let now_epoch_secs = MePool::now_epoch_secs();
let conn_a = insert_draining_writer(&pool, 10, now_epoch_secs.saturating_sub(30)).await;
let conn_b = insert_draining_writer(&pool, 20, now_epoch_secs.saturating_sub(20)).await;
let conn_c = insert_draining_writer(&pool, 30, now_epoch_secs.saturating_sub(10)).await;
let mut warn_next_allowed = HashMap::new();
reap_draining_writers(&pool, &mut warn_next_allowed).await;
let writer_ids: Vec<u64> = pool.writers.read().await.iter().map(|writer| writer.id).collect();
assert_eq!(writer_ids, vec![10, 20, 30]);
assert_eq!(pool.registry.get_writer(conn_a).await.unwrap().writer_id, 10);
assert_eq!(pool.registry.get_writer(conn_b).await.unwrap().writer_id, 20);
assert_eq!(pool.registry.get_writer(conn_c).await.unwrap().writer_id, 30);
}
}

View File

@@ -38,7 +38,9 @@ pub use config_updater::{
me_config_updater, save_proxy_config_cache,
};
pub use rotation::{MeReinitTrigger, me_reinit_scheduler, me_rotation_task};
pub(crate) use selftest::{bnd_snapshot, timeskew_snapshot};
pub(crate) use selftest::{
bnd_snapshot, timeskew_snapshot, upstream_bnd_snapshots,
};
pub use wire::proto_flags_for_tag;
#[derive(Debug)]

View File

@@ -34,6 +34,7 @@ pub(super) struct RefillEndpointKey {
pub struct MeWriter {
pub id: u64,
pub addr: SocketAddr,
pub source_ip: IpAddr,
pub writer_dc: i32,
pub generation: u64,
pub contour: Arc<AtomicU8>,
@@ -170,6 +171,7 @@ pub struct MePool {
pub(super) endpoint_quarantine: Arc<Mutex<HashMap<SocketAddr, Instant>>>,
pub(super) kdf_material_fingerprint: Arc<RwLock<HashMap<SocketAddr, (u64, u16)>>>,
pub(super) me_pool_drain_ttl_secs: AtomicU64,
pub(super) me_pool_drain_threshold: AtomicU64,
pub(super) me_pool_force_close_secs: AtomicU64,
pub(super) me_pool_min_fresh_ratio_permille: AtomicU32,
pub(super) me_hardswap_warmup_delay_min_ms: AtomicU64,
@@ -270,6 +272,7 @@ impl MePool {
me_adaptive_floor_max_warm_writers_global: u32,
hardswap: bool,
me_pool_drain_ttl_secs: u64,
me_pool_drain_threshold: u64,
me_pool_force_close_secs: u64,
me_pool_min_fresh_ratio: f32,
me_hardswap_warmup_delay_min_ms: u64,
@@ -445,6 +448,7 @@ impl MePool {
endpoint_quarantine: Arc::new(Mutex::new(HashMap::new())),
kdf_material_fingerprint: Arc::new(RwLock::new(HashMap::new())),
me_pool_drain_ttl_secs: AtomicU64::new(me_pool_drain_ttl_secs),
me_pool_drain_threshold: AtomicU64::new(me_pool_drain_threshold),
me_pool_force_close_secs: AtomicU64::new(me_pool_force_close_secs),
me_pool_min_fresh_ratio_permille: AtomicU32::new(Self::ratio_to_permille(
me_pool_min_fresh_ratio,
@@ -491,6 +495,7 @@ impl MePool {
&self,
hardswap: bool,
drain_ttl_secs: u64,
pool_drain_threshold: u64,
force_close_secs: u64,
min_fresh_ratio: f32,
hardswap_warmup_delay_min_ms: u64,
@@ -529,6 +534,8 @@ impl MePool {
self.hardswap.store(hardswap, Ordering::Relaxed);
self.me_pool_drain_ttl_secs
.store(drain_ttl_secs, Ordering::Relaxed);
self.me_pool_drain_threshold
.store(pool_drain_threshold, Ordering::Relaxed);
self.me_pool_force_close_secs
.store(force_close_secs, Ordering::Relaxed);
self.me_pool_min_fresh_ratio_permille
@@ -638,9 +645,9 @@ impl MePool {
}
}
/// Translate the local ME address into the address material sent to the proxy.
pub fn translate_our_addr(&self, addr: SocketAddr) -> SocketAddr {
let ip = self.translate_ip_for_nat(addr.ip());
SocketAddr::new(ip, addr.port())
self.translate_our_addr_with_reflection(addr, None)
}
pub fn registry(&self) -> &Arc<ConnRegistry> {

View File

@@ -159,7 +159,13 @@ impl MePool {
addr: std::net::SocketAddr,
reflected: Option<std::net::SocketAddr>,
) -> std::net::SocketAddr {
let ip = if let Some(r) = reflected {
let ip = if let Some(nat_ip) = self.nat_ip_cfg {
match (addr.ip(), nat_ip) {
(IpAddr::V4(_), IpAddr::V4(dst)) => IpAddr::V4(dst),
(IpAddr::V6(_), IpAddr::V6(dst)) => IpAddr::V6(dst),
_ => addr.ip(),
}
} else if let Some(r) = reflected {
// Use reflected IP (not port) only when local address is non-public.
if is_bogon(addr.ip()) || addr.ip().is_loopback() || addr.ip().is_unspecified() {
r.ip()

View File

@@ -19,6 +19,12 @@ pub(crate) struct MeApiWriterStatusSnapshot {
pub bound_clients: usize,
pub idle_for_secs: Option<u64>,
pub rtt_ema_ms: Option<f64>,
pub matches_active_generation: bool,
pub in_desired_map: bool,
pub allow_drain_fallback: bool,
pub drain_started_at_epoch_secs: Option<u64>,
pub drain_deadline_epoch_secs: Option<u64>,
pub drain_over_ttl: bool,
}
#[derive(Clone, Debug)]
@@ -35,6 +41,8 @@ pub(crate) struct MeApiDcStatusSnapshot {
pub floor_capped: bool,
pub alive_writers: usize,
pub coverage_pct: f64,
pub fresh_alive_writers: usize,
pub fresh_coverage_pct: f64,
pub rtt_ms: Option<f64>,
pub load: usize,
}
@@ -55,6 +63,8 @@ pub(crate) struct MeApiStatusSnapshot {
pub required_writers: usize,
pub alive_writers: usize,
pub coverage_pct: f64,
pub fresh_alive_writers: usize,
pub fresh_coverage_pct: f64,
pub writers: Vec<MeApiWriterStatusSnapshot>,
pub dcs: Vec<MeApiDcStatusSnapshot>,
}
@@ -213,6 +223,8 @@ impl MePool {
pub(crate) async fn api_status_snapshot(&self) -> MeApiStatusSnapshot {
let now_epoch_secs = Self::now_epoch_secs();
let active_generation = self.current_generation();
let drain_ttl_secs = self.me_pool_drain_ttl_secs.load(Ordering::Relaxed);
let mut endpoints_by_dc = BTreeMap::<i16, BTreeSet<SocketAddr>>::new();
if self.decision.ipv4_me {
@@ -239,6 +251,7 @@ impl MePool {
let mut live_writers_by_dc_endpoint = HashMap::<(i16, SocketAddr), usize>::new();
let mut live_writers_by_dc = HashMap::<i16, usize>::new();
let mut fresh_writers_by_dc = HashMap::<i16, usize>::new();
let mut dc_rtt_agg = HashMap::<i16, (f64, u64)>::new();
let mut writer_rows = Vec::<MeApiWriterStatusSnapshot>::with_capacity(writers.len());
@@ -247,6 +260,10 @@ impl MePool {
let dc = i16::try_from(writer.writer_dc).ok();
let draining = writer.draining.load(Ordering::Relaxed);
let degraded = writer.degraded.load(Ordering::Relaxed);
let matches_active_generation = writer.generation == active_generation;
let in_desired_map = dc
.and_then(|dc_idx| endpoints_by_dc.get(&dc_idx))
.is_some_and(|endpoints| endpoints.contains(&endpoint));
let bound_clients = activity
.bound_clients_by_writer
.get(&writer.id)
@@ -256,6 +273,21 @@ impl MePool {
.get(&writer.id)
.map(|idle_ts| now_epoch_secs.saturating_sub(*idle_ts));
let rtt_ema_ms = rtt.get(&writer.id).map(|(_, ema)| *ema);
let allow_drain_fallback = writer.allow_drain_fallback.load(Ordering::Relaxed);
let drain_started_at_epoch_secs = writer
.draining_started_at_epoch_secs
.load(Ordering::Relaxed);
let drain_deadline_epoch_secs = writer
.drain_deadline_epoch_secs
.load(Ordering::Relaxed);
let drain_started_at_epoch_secs =
(drain_started_at_epoch_secs != 0).then_some(drain_started_at_epoch_secs);
let drain_deadline_epoch_secs =
(drain_deadline_epoch_secs != 0).then_some(drain_deadline_epoch_secs);
let drain_over_ttl = draining
&& drain_ttl_secs > 0
&& drain_started_at_epoch_secs
.is_some_and(|started| now_epoch_secs.saturating_sub(started) > drain_ttl_secs);
let state = match WriterContour::from_u8(writer.contour.load(Ordering::Relaxed)) {
WriterContour::Warm => "warm",
WriterContour::Active => "active",
@@ -273,6 +305,9 @@ impl MePool {
entry.0 += ema_ms;
entry.1 += 1;
}
if matches_active_generation && in_desired_map {
*fresh_writers_by_dc.entry(dc_idx).or_insert(0) += 1;
}
}
}
@@ -287,6 +322,12 @@ impl MePool {
bound_clients,
idle_for_secs,
rtt_ema_ms,
matches_active_generation,
in_desired_map,
allow_drain_fallback,
drain_started_at_epoch_secs,
drain_deadline_epoch_secs,
drain_over_ttl,
});
}
@@ -295,6 +336,7 @@ impl MePool {
let mut dcs = Vec::<MeApiDcStatusSnapshot>::with_capacity(endpoints_by_dc.len());
let mut available_endpoints = 0usize;
let mut alive_writers = 0usize;
let mut fresh_alive_writers = 0usize;
let floor_mode = self.floor_mode();
let adaptive_cpu_cores = (self
.me_adaptive_floor_cpu_cores_effective
@@ -333,6 +375,7 @@ impl MePool {
let floor_capped = matches!(floor_mode, MeFloorMode::Adaptive)
&& dc_required_writers < base_required;
let dc_alive_writers = live_writers_by_dc.get(&dc).copied().unwrap_or(0);
let dc_fresh_alive_writers = fresh_writers_by_dc.get(&dc).copied().unwrap_or(0);
let dc_load = activity
.active_sessions_by_target_dc
.get(&dc)
@@ -344,6 +387,7 @@ impl MePool {
available_endpoints += dc_available_endpoints;
alive_writers += dc_alive_writers;
fresh_alive_writers += dc_fresh_alive_writers;
dcs.push(MeApiDcStatusSnapshot {
dc,
@@ -367,6 +411,8 @@ impl MePool {
floor_capped,
alive_writers: dc_alive_writers,
coverage_pct: ratio_pct(dc_alive_writers, dc_required_writers),
fresh_alive_writers: dc_fresh_alive_writers,
fresh_coverage_pct: ratio_pct(dc_fresh_alive_writers, dc_required_writers),
rtt_ms: dc_rtt_ms,
load: dc_load,
});
@@ -381,6 +427,8 @@ impl MePool {
required_writers,
alive_writers,
coverage_pct: ratio_pct(alive_writers, required_writers),
fresh_alive_writers,
fresh_coverage_pct: ratio_pct(fresh_alive_writers, required_writers),
writers: writer_rows,
dcs,
}

View File

@@ -163,6 +163,7 @@ impl MePool {
let writer = MeWriter {
id: writer_id,
addr,
source_ip: hs.source_ip,
writer_dc,
generation,
contour: contour.clone(),
@@ -177,6 +178,7 @@ impl MePool {
allow_drain_fallback: allow_drain_fallback.clone(),
};
self.writers.write().await.push(writer.clone());
self.registry.register_writer(writer_id, tx.clone()).await;
self.registry.mark_writer_idle(writer_id).await;
self.conn_count.fetch_add(1, Ordering::Relaxed);
self.writer_available.notify_one();
@@ -413,9 +415,15 @@ impl MePool {
};
let (conn_id, mut service_rx) = pool.registry.register().await;
pool.registry
.bind_writer(conn_id, writer_id, tx_signal.clone(), meta.clone())
.await;
if !pool
.registry
.bind_writer(conn_id, writer_id, meta.clone())
.await
{
let _ = pool.registry.unregister(conn_id).await;
stats_signal.increment_me_rpc_proxy_req_signal_skipped_no_meta_total();
continue;
}
let payload = build_proxy_req_payload(
conn_id,
@@ -520,6 +528,12 @@ impl MePool {
self.conn_count.fetch_sub(1, Ordering::Relaxed);
}
}
let conns = self.registry.writer_lost(writer_id).await;
{
let mut tracker = self.ping_tracker.lock().await;
tracker.retain(|_, (_, wid)| *wid != writer_id);
}
self.rtt_stats.lock().await.remove(&writer_id);
if let Some(tx) = close_tx {
let _ = tx.send(WriterCommand::Close).await;
}
@@ -532,8 +546,7 @@ impl MePool {
}
self.trigger_immediate_refill_for_dc(addr, writer_dc);
}
self.rtt_stats.lock().await.remove(&writer_id);
self.registry.writer_lost(writer_id).await
conns
}
pub(crate) async fn mark_writer_draining_with_timeout(

View File

@@ -138,6 +138,15 @@ impl ConnRegistry {
(id, rx)
}
pub async fn register_writer(&self, writer_id: u64, tx: mpsc::Sender<WriterCommand>) {
let mut inner = self.inner.write().await;
inner.writers.insert(writer_id, tx);
inner
.conns_for_writer
.entry(writer_id)
.or_insert_with(HashSet::new);
}
/// Unregister connection, returning associated writer_id if any.
pub async fn unregister(&self, id: u64) -> Option<u64> {
let mut inner = self.inner.write().await;
@@ -282,24 +291,39 @@ impl ConnRegistry {
}
}
pub async fn bind_writer(
&self,
conn_id: u64,
writer_id: u64,
tx: mpsc::Sender<WriterCommand>,
meta: ConnMeta,
) {
pub async fn bind_writer(&self, conn_id: u64, writer_id: u64, meta: ConnMeta) -> bool {
let mut inner = self.inner.write().await;
inner.meta.entry(conn_id).or_insert(meta.clone());
inner.writer_for_conn.insert(conn_id, writer_id);
if !inner.writers.contains_key(&writer_id) {
return false;
}
let previous_writer_id = inner.writer_for_conn.insert(conn_id, writer_id);
if let Some(previous_writer_id) = previous_writer_id
&& previous_writer_id != writer_id
{
let became_empty = if let Some(set) = inner.conns_for_writer.get_mut(&previous_writer_id)
{
set.remove(&conn_id);
set.is_empty()
} else {
false
};
if became_empty {
inner
.writer_idle_since_epoch_secs
.insert(previous_writer_id, Self::now_epoch_secs());
}
}
inner.meta.insert(conn_id, meta.clone());
inner.last_meta_for_writer.insert(writer_id, meta);
inner.writer_idle_since_epoch_secs.remove(&writer_id);
inner.writers.entry(writer_id).or_insert_with(|| tx.clone());
inner
.conns_for_writer
.entry(writer_id)
.or_insert_with(HashSet::new)
.insert(conn_id);
true
}
pub async fn mark_writer_idle(&self, writer_id: u64) {
@@ -384,6 +408,9 @@ impl ConnRegistry {
let mut out = Vec::new();
for conn_id in conns {
if inner.writer_for_conn.get(&conn_id).copied() != Some(writer_id) {
continue;
}
inner.writer_for_conn.remove(&conn_id);
if let Some(m) = inner.meta.get(&conn_id) {
out.push(BoundConn {
@@ -427,47 +454,52 @@ mod tests {
let (conn_c, _rx_c) = registry.register().await;
let (writer_tx_a, _writer_rx_a) = tokio::sync::mpsc::channel(8);
let (writer_tx_b, _writer_rx_b) = tokio::sync::mpsc::channel(8);
registry.register_writer(10, writer_tx_a.clone()).await;
registry.register_writer(20, writer_tx_b.clone()).await;
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 443);
registry
.bind_writer(
conn_a,
10,
writer_tx_a.clone(),
ConnMeta {
target_dc: 2,
client_addr: addr,
our_addr: addr,
proto_flags: 0,
},
)
.await;
registry
.bind_writer(
conn_b,
10,
writer_tx_a,
ConnMeta {
target_dc: -2,
client_addr: addr,
our_addr: addr,
proto_flags: 0,
},
)
.await;
registry
.bind_writer(
conn_c,
20,
writer_tx_b,
ConnMeta {
target_dc: 4,
client_addr: addr,
our_addr: addr,
proto_flags: 0,
},
)
.await;
assert!(
registry
.bind_writer(
conn_a,
10,
ConnMeta {
target_dc: 2,
client_addr: addr,
our_addr: addr,
proto_flags: 0,
},
)
.await
);
assert!(
registry
.bind_writer(
conn_b,
10,
ConnMeta {
target_dc: -2,
client_addr: addr,
our_addr: addr,
proto_flags: 0,
},
)
.await
);
assert!(
registry
.bind_writer(
conn_c,
20,
ConnMeta {
target_dc: 4,
client_addr: addr,
our_addr: addr,
proto_flags: 0,
},
)
.await
);
let snapshot = registry.writer_activity_snapshot().await;
assert_eq!(snapshot.bound_clients_by_writer.get(&10), Some(&2));
@@ -476,4 +508,130 @@ mod tests {
assert_eq!(snapshot.active_sessions_by_target_dc.get(&-2), Some(&1));
assert_eq!(snapshot.active_sessions_by_target_dc.get(&4), Some(&1));
}
#[tokio::test]
async fn bind_writer_rebinds_conn_atomically() {
let registry = ConnRegistry::new();
let (conn_id, _rx) = registry.register().await;
let (writer_tx_a, _writer_rx_a) = tokio::sync::mpsc::channel(8);
let (writer_tx_b, _writer_rx_b) = tokio::sync::mpsc::channel(8);
registry.register_writer(10, writer_tx_a).await;
registry.register_writer(20, writer_tx_b).await;
let client_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 443);
let first_our_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 1, 1, 1)), 443);
let second_our_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(2, 2, 2, 2)), 443);
assert!(
registry
.bind_writer(
conn_id,
10,
ConnMeta {
target_dc: 2,
client_addr,
our_addr: first_our_addr,
proto_flags: 1,
},
)
.await
);
assert!(
registry
.bind_writer(
conn_id,
20,
ConnMeta {
target_dc: 2,
client_addr,
our_addr: second_our_addr,
proto_flags: 2,
},
)
.await
);
let writer = registry.get_writer(conn_id).await.expect("writer binding");
assert_eq!(writer.writer_id, 20);
let meta = registry.get_meta(conn_id).await.expect("conn meta");
assert_eq!(meta.our_addr, second_our_addr);
assert_eq!(meta.proto_flags, 2);
let snapshot = registry.writer_activity_snapshot().await;
assert_eq!(snapshot.bound_clients_by_writer.get(&10), Some(&0));
assert_eq!(snapshot.bound_clients_by_writer.get(&20), Some(&1));
assert!(registry.writer_idle_since_snapshot().await.contains_key(&10));
}
#[tokio::test]
async fn writer_lost_does_not_drop_rebound_conn() {
let registry = ConnRegistry::new();
let (conn_id, _rx) = registry.register().await;
let (writer_tx_a, _writer_rx_a) = tokio::sync::mpsc::channel(8);
let (writer_tx_b, _writer_rx_b) = tokio::sync::mpsc::channel(8);
registry.register_writer(10, writer_tx_a).await;
registry.register_writer(20, writer_tx_b).await;
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 443);
assert!(
registry
.bind_writer(
conn_id,
10,
ConnMeta {
target_dc: 2,
client_addr: addr,
our_addr: addr,
proto_flags: 0,
},
)
.await
);
assert!(
registry
.bind_writer(
conn_id,
20,
ConnMeta {
target_dc: 2,
client_addr: addr,
our_addr: addr,
proto_flags: 1,
},
)
.await
);
let lost = registry.writer_lost(10).await;
assert!(lost.is_empty());
assert_eq!(registry.get_writer(conn_id).await.expect("writer").writer_id, 20);
let removed_writer = registry.unregister(conn_id).await;
assert_eq!(removed_writer, Some(20));
assert!(registry.is_writer_empty(20).await);
}
#[tokio::test]
async fn bind_writer_rejects_unregistered_writer() {
let registry = ConnRegistry::new();
let (conn_id, _rx) = registry.register().await;
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 443);
assert!(
!registry
.bind_writer(
conn_id,
10,
ConnMeta {
target_dc: 2,
client_addr: addr,
our_addr: addr,
proto_flags: 0,
},
)
.await
);
assert!(registry.get_writer(conn_id).await.is_none());
}
}

View File

@@ -1,5 +1,5 @@
use std::collections::VecDeque;
use std::net::SocketAddr;
use std::collections::{HashMap, VecDeque};
use std::net::{IpAddr, SocketAddr};
use std::sync::{Mutex, OnceLock};
use std::time::{SystemTime, UNIX_EPOCH};
@@ -45,6 +45,16 @@ pub(crate) struct MeBndSnapshot {
pub last_seen_age_secs: Option<u64>,
}
#[derive(Clone, Debug)]
pub(crate) struct MeUpstreamBndSnapshot {
pub upstream_id: usize,
pub addr_status: &'static str,
pub port_status: &'static str,
pub last_addr: Option<SocketAddr>,
pub last_ip: Option<IpAddr>,
pub last_seen_age_secs: Option<u64>,
}
#[derive(Clone, Debug, Default)]
pub(crate) struct MeTimeskewSnapshot {
pub max_skew_secs_15m: Option<u64>,
@@ -67,9 +77,19 @@ struct MeSelftestState {
bnd_port_status: BndPortStatus,
bnd_last_addr: Option<SocketAddr>,
bnd_last_seen_epoch_secs: Option<u64>,
upstream_bnd: HashMap<usize, UpstreamBndState>,
timeskew_samples: VecDeque<MeTimeskewSample>,
}
#[derive(Clone, Copy, Debug)]
struct UpstreamBndState {
addr_status: BndAddrStatus,
port_status: BndPortStatus,
last_addr: Option<SocketAddr>,
last_ip: Option<IpAddr>,
last_seen_epoch_secs: Option<u64>,
}
impl Default for MeSelftestState {
fn default() -> Self {
Self {
@@ -77,6 +97,7 @@ impl Default for MeSelftestState {
bnd_port_status: BndPortStatus::Error,
bnd_last_addr: None,
bnd_last_seen_epoch_secs: None,
upstream_bnd: HashMap::new(),
timeskew_samples: VecDeque::new(),
}
}
@@ -126,6 +147,51 @@ pub(crate) fn bnd_snapshot() -> MeBndSnapshot {
}
}
pub(crate) fn record_upstream_bnd_status(
upstream_id: usize,
addr_status: BndAddrStatus,
port_status: BndPortStatus,
last_addr: Option<SocketAddr>,
last_ip: Option<IpAddr>,
) {
let now_epoch_secs = now_epoch_secs();
let Ok(mut guard) = state().lock() else {
return;
};
guard.upstream_bnd.insert(
upstream_id,
UpstreamBndState {
addr_status,
port_status,
last_addr,
last_ip,
last_seen_epoch_secs: Some(now_epoch_secs),
},
);
}
pub(crate) fn upstream_bnd_snapshots() -> Vec<MeUpstreamBndSnapshot> {
let now_epoch_secs = now_epoch_secs();
let Ok(guard) = state().lock() else {
return Vec::new();
};
let mut out = Vec::with_capacity(guard.upstream_bnd.len());
for (upstream_id, entry) in &guard.upstream_bnd {
out.push(MeUpstreamBndSnapshot {
upstream_id: *upstream_id,
addr_status: entry.addr_status.as_str(),
port_status: entry.port_status.as_str(),
last_addr: entry.last_addr,
last_ip: entry.last_ip,
last_seen_age_secs: entry
.last_seen_epoch_secs
.map(|value| now_epoch_secs.saturating_sub(value)),
});
}
out.sort_by_key(|entry| entry.upstream_id);
out
}
pub(crate) fn record_timeskew_sample(source: &'static str, skew_secs: u64) {
let now_epoch_secs = now_epoch_secs();
let Ok(mut guard) = state().lock() else {

View File

@@ -42,20 +42,30 @@ impl MePool {
tag_override: Option<&[u8]>,
) -> Result<()> {
let tag = tag_override.or(self.proxy_tag.as_deref());
let payload = build_proxy_req_payload(
conn_id,
client_addr,
our_addr,
data,
tag,
proto_flags,
);
let meta = ConnMeta {
let fallback_meta = ConnMeta {
target_dc,
client_addr,
our_addr,
proto_flags,
};
let build_routed_payload = |effective_our_addr: SocketAddr| {
(
build_proxy_req_payload(
conn_id,
client_addr,
effective_our_addr,
data,
tag,
proto_flags,
),
ConnMeta {
target_dc,
client_addr,
our_addr: effective_our_addr,
proto_flags,
},
)
};
let no_writer_mode =
MeRouteNoWriterMode::from_u8(self.me_route_no_writer_mode.load(Ordering::Relaxed));
let (routed_dc, unknown_target_dc) = self
@@ -70,8 +80,14 @@ impl MePool {
let mut hybrid_wait_current = hybrid_wait_step;
loop {
let current_meta = self
.registry
.get_meta(conn_id)
.await
.unwrap_or_else(|| fallback_meta.clone());
let (current_payload, _) = build_routed_payload(current_meta.our_addr);
if let Some(current) = self.registry.get_writer(conn_id).await {
match current.tx.try_send(WriterCommand::Data(payload.clone())) {
match current.tx.try_send(WriterCommand::Data(current_payload.clone())) {
Ok(()) => return Ok(()),
Err(TrySendError::Full(cmd)) => {
if current.tx.send(cmd).await.is_ok() {
@@ -354,12 +370,19 @@ impl MePool {
if !self.writer_accepts_new_binding(w) {
continue;
}
let effective_our_addr = SocketAddr::new(w.source_ip, our_addr.port());
let (payload, meta) = build_routed_payload(effective_our_addr);
match w.tx.try_send(WriterCommand::Data(payload.clone())) {
Ok(()) => {
self.stats.increment_me_writer_pick_success_try_total(pick_mode);
self.registry
.bind_writer(conn_id, w.id, w.tx.clone(), meta.clone())
.await;
if !self.registry.bind_writer(conn_id, w.id, meta).await {
debug!(
conn_id,
writer_id = w.id,
"ME writer disappeared before bind commit, retrying"
);
continue;
}
if w.generation < self.current_generation() {
self.stats.increment_pool_stale_pick_total();
debug!(
@@ -397,13 +420,20 @@ impl MePool {
continue;
}
self.stats.increment_me_writer_pick_blocking_fallback_total();
let effective_our_addr = SocketAddr::new(w.source_ip, our_addr.port());
let (payload, meta) = build_routed_payload(effective_our_addr);
match w.tx.send(WriterCommand::Data(payload.clone())).await {
Ok(()) => {
self.stats
.increment_me_writer_pick_success_fallback_total(pick_mode);
self.registry
.bind_writer(conn_id, w.id, w.tx.clone(), meta.clone())
.await;
if !self.registry.bind_writer(conn_id, w.id, meta).await {
debug!(
conn_id,
writer_id = w.id,
"ME writer disappeared before fallback bind commit, retrying"
);
continue;
}
if w.generation < self.current_generation() {
self.stats.increment_pool_stale_pick_total();
}

View File

@@ -1,6 +1,8 @@
//! TCP Socket Configuration
#[cfg(target_os = "linux")]
use std::collections::HashSet;
#[cfg(target_os = "linux")]
use std::fs;
use std::io::Result;
use std::net::{SocketAddr, IpAddr};
@@ -44,6 +46,7 @@ pub fn configure_tcp_socket(
pub fn configure_client_socket(
stream: &TcpStream,
keepalive_secs: u64,
#[cfg_attr(not(target_os = "linux"), allow(unused_variables))]
ack_timeout_secs: u64,
) -> Result<()> {
let socket = socket2::SockRef::from(stream);
@@ -65,17 +68,27 @@ pub fn configure_client_socket(
// is implemented in relay_bidirectional instead
#[cfg(target_os = "linux")]
{
use std::io::{Error, ErrorKind};
use std::os::unix::io::AsRawFd;
let fd = stream.as_raw_fd();
let timeout_ms = (ack_timeout_secs * 1000) as libc::c_int;
unsafe {
let timeout_ms_u64 = ack_timeout_secs
.checked_mul(1000)
.ok_or_else(|| Error::new(ErrorKind::InvalidInput, "ack_timeout_secs is too large"))?;
let timeout_ms = i32::try_from(timeout_ms_u64)
.map_err(|_| Error::new(ErrorKind::InvalidInput, "ack_timeout_secs exceeds TCP_USER_TIMEOUT range"))?;
let rc = unsafe {
libc::setsockopt(
fd,
libc::IPPROTO_TCP,
libc::TCP_USER_TIMEOUT,
&timeout_ms as *const _ as *const libc::c_void,
&timeout_ms as *const libc::c_int as *const libc::c_void,
std::mem::size_of::<libc::c_int>() as libc::socklen_t,
);
)
};
if rc != 0 {
return Err(Error::last_os_error());
}
}
@@ -373,6 +386,7 @@ fn listening_inodes_for_port(addr: SocketAddr) -> HashSet<u64> {
mod tests {
use super::*;
use std::io::ErrorKind;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpListener;
#[tokio::test]
@@ -396,6 +410,142 @@ mod tests {
panic!("configure_tcp_socket failed: {e}");
}
}
#[tokio::test]
async fn test_configure_client_socket() {
let listener = match TcpListener::bind("127.0.0.1:0").await {
Ok(l) => l,
Err(e) if e.kind() == ErrorKind::PermissionDenied => return,
Err(e) => panic!("bind failed: {e}"),
};
let addr = match listener.local_addr() {
Ok(addr) => addr,
Err(e) => panic!("local_addr failed: {e}"),
};
let stream = match TcpStream::connect(addr).await {
Ok(s) => s,
Err(e) if e.kind() == ErrorKind::PermissionDenied => return,
Err(e) => panic!("connect failed: {e}"),
};
if let Err(e) = configure_client_socket(&stream, 30, 30) {
if e.kind() == ErrorKind::PermissionDenied {
return;
}
panic!("configure_client_socket failed: {e}");
}
}
#[tokio::test]
async fn test_configure_client_socket_zero_ack_timeout() {
let listener = match TcpListener::bind("127.0.0.1:0").await {
Ok(l) => l,
Err(e) if e.kind() == ErrorKind::PermissionDenied => return,
Err(e) => panic!("bind failed: {e}"),
};
let addr = match listener.local_addr() {
Ok(addr) => addr,
Err(e) => panic!("local_addr failed: {e}"),
};
let stream = match TcpStream::connect(addr).await {
Ok(s) => s,
Err(e) if e.kind() == ErrorKind::PermissionDenied => return,
Err(e) => panic!("connect failed: {e}"),
};
if let Err(e) = configure_client_socket(&stream, 30, 0) {
if e.kind() == ErrorKind::PermissionDenied {
return;
}
panic!("configure_client_socket with zero ack timeout failed: {e}");
}
}
#[tokio::test]
async fn test_configure_client_socket_roundtrip_io() {
let listener = match TcpListener::bind("127.0.0.1:0").await {
Ok(l) => l,
Err(e) if e.kind() == ErrorKind::PermissionDenied => return,
Err(e) => panic!("bind failed: {e}"),
};
let addr = match listener.local_addr() {
Ok(addr) => addr,
Err(e) => panic!("local_addr failed: {e}"),
};
let server_task = tokio::spawn(async move {
let (mut accepted, _) = match listener.accept().await {
Ok(v) => v,
Err(e) => panic!("accept failed: {e}"),
};
let mut payload = [0u8; 4];
if let Err(e) = accepted.read_exact(&mut payload).await {
panic!("server read_exact failed: {e}");
}
if let Err(e) = accepted.write_all(b"pong").await {
panic!("server write_all failed: {e}");
}
payload
});
let mut stream = match TcpStream::connect(addr).await {
Ok(s) => s,
Err(e) if e.kind() == ErrorKind::PermissionDenied => return,
Err(e) => panic!("connect failed: {e}"),
};
if let Err(e) = configure_client_socket(&stream, 30, 30) {
if e.kind() == ErrorKind::PermissionDenied {
return;
}
panic!("configure_client_socket failed: {e}");
}
if let Err(e) = stream.write_all(b"ping").await {
panic!("client write_all failed: {e}");
}
let mut reply = [0u8; 4];
if let Err(e) = stream.read_exact(&mut reply).await {
panic!("client read_exact failed: {e}");
}
assert_eq!(&reply, b"pong");
let server_seen = match server_task.await {
Ok(value) => value,
Err(e) => panic!("server task join failed: {e}"),
};
assert_eq!(&server_seen, b"ping");
}
#[cfg(target_os = "linux")]
#[tokio::test]
async fn test_configure_client_socket_ack_timeout_overflow_rejected() {
let listener = match TcpListener::bind("127.0.0.1:0").await {
Ok(l) => l,
Err(e) if e.kind() == ErrorKind::PermissionDenied => return,
Err(e) => panic!("bind failed: {e}"),
};
let addr = match listener.local_addr() {
Ok(addr) => addr,
Err(e) => panic!("local_addr failed: {e}"),
};
let stream = match TcpStream::connect(addr).await {
Ok(s) => s,
Err(e) if e.kind() == ErrorKind::PermissionDenied => return,
Err(e) => panic!("connect failed: {e}"),
};
let too_large_secs = (i32::MAX as u64 / 1000) + 1;
let err = match configure_client_socket(&stream, 30, too_large_secs) {
Ok(()) => panic!("expected overflow validation error"),
Err(e) => e,
};
assert_eq!(err.kind(), ErrorKind::InvalidInput);
}
#[test]
fn test_normalize_ip() {

View File

@@ -213,6 +213,7 @@ pub struct UpstreamApiPolicySnapshot {
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct UpstreamEgressInfo {
pub upstream_id: usize,
pub route_kind: UpstreamRouteKind,
pub local_addr: Option<SocketAddr>,
pub direct_bind_ip: Option<IpAddr>,
@@ -389,7 +390,7 @@ impl UpstreamManager {
out
}
fn resolve_bind_address(
pub(crate) fn resolve_bind_address(
interface: &Option<String>,
bind_addresses: &Option<Vec<String>>,
target: SocketAddr,
@@ -398,7 +399,7 @@ impl UpstreamManager {
) -> Option<IpAddr> {
let want_ipv6 = target.is_ipv6();
if let Some(addrs) = bind_addresses {
if let Some(addrs) = bind_addresses.as_ref().filter(|v| !v.is_empty()) {
let mut candidates: Vec<IpAddr> = addrs
.iter()
.filter_map(|s| s.parse::<IpAddr>().ok())
@@ -430,7 +431,7 @@ impl UpstreamManager {
warn!(
interface = %iface,
target = %target,
"Configured interface has no addresses for target family; falling back to direct connect without bind"
"Configured interface has no addresses for target family"
);
candidates.clear();
}
@@ -453,10 +454,11 @@ impl UpstreamManager {
warn!(
interface = interface.as_deref().unwrap_or(""),
target = %target,
"No valid bind_addresses left for interface; falling back to direct connect without bind"
"No valid bind_addresses left for interface"
);
return None;
}
return None;
}
if let Some(iface) = interface {
@@ -672,7 +674,7 @@ impl UpstreamManager {
self.stats.increment_upstream_connect_attempt_total();
let start = Instant::now();
match self
.connect_via_upstream(&upstream, target, bind_rr.clone(), attempt_timeout)
.connect_via_upstream(idx, &upstream, target, bind_rr.clone(), attempt_timeout)
.await
{
Ok((stream, egress)) => {
@@ -779,6 +781,7 @@ impl UpstreamManager {
async fn connect_via_upstream(
&self,
upstream_id: usize,
config: &UpstreamConfig,
target: SocketAddr,
bind_rr: Option<Arc<AtomicUsize>>,
@@ -793,6 +796,13 @@ impl UpstreamManager {
bind_rr.as_deref(),
true,
);
if bind_ip.is_none()
&& bind_addresses.as_ref().is_some_and(|v| !v.is_empty())
{
return Err(ProxyError::Config(format!(
"No valid bind_addresses for target family {target}"
)));
}
let socket = create_outgoing_socket_bound(target, bind_ip)?;
if let Some(ip) = bind_ip {
@@ -828,6 +838,7 @@ impl UpstreamManager {
Ok((
stream,
UpstreamEgressInfo {
upstream_id,
route_kind: UpstreamRouteKind::Direct,
local_addr,
direct_bind_ip: bind_ip,
@@ -906,6 +917,7 @@ impl UpstreamManager {
Ok((
stream,
UpstreamEgressInfo {
upstream_id,
route_kind: UpstreamRouteKind::Socks4,
local_addr,
direct_bind_ip: None,
@@ -986,6 +998,7 @@ impl UpstreamManager {
Ok((
stream,
UpstreamEgressInfo {
upstream_id,
route_kind: UpstreamRouteKind::Socks5,
local_addr,
direct_bind_ip: None,
@@ -1048,7 +1061,7 @@ impl UpstreamManager {
let result = tokio::time::timeout(
Duration::from_secs(DC_PING_TIMEOUT_SECS),
self.ping_single_dc(upstream_config, Some(bind_rr.clone()), addr_v6)
self.ping_single_dc(*upstream_idx, upstream_config, Some(bind_rr.clone()), addr_v6)
).await;
let ping_result = match result {
@@ -1099,7 +1112,7 @@ impl UpstreamManager {
let result = tokio::time::timeout(
Duration::from_secs(DC_PING_TIMEOUT_SECS),
self.ping_single_dc(upstream_config, Some(bind_rr.clone()), addr_v4)
self.ping_single_dc(*upstream_idx, upstream_config, Some(bind_rr.clone()), addr_v4)
).await;
let ping_result = match result {
@@ -1162,7 +1175,7 @@ impl UpstreamManager {
}
let result = tokio::time::timeout(
Duration::from_secs(DC_PING_TIMEOUT_SECS),
self.ping_single_dc(upstream_config, Some(bind_rr.clone()), addr)
self.ping_single_dc(*upstream_idx, upstream_config, Some(bind_rr.clone()), addr)
).await;
let ping_result = match result {
@@ -1233,6 +1246,7 @@ impl UpstreamManager {
async fn ping_single_dc(
&self,
upstream_id: usize,
config: &UpstreamConfig,
bind_rr: Option<Arc<AtomicUsize>>,
target: SocketAddr,
@@ -1240,6 +1254,7 @@ impl UpstreamManager {
let start = Instant::now();
let _ = self
.connect_via_upstream(
upstream_id,
config,
target,
bind_rr,
@@ -1418,6 +1433,7 @@ impl UpstreamManager {
let result = tokio::time::timeout(
Duration::from_secs(HEALTH_CHECK_CONNECT_TIMEOUT_SECS),
self.connect_via_upstream(
i,
&config,
endpoint,
Some(bind_rr.clone()),
@@ -1634,4 +1650,32 @@ mod tests {
};
assert!(!UpstreamManager::is_hard_connect_error(&error));
}
#[test]
fn resolve_bind_address_prefers_explicit_bind_ip() {
let target = "203.0.113.10:443".parse::<SocketAddr>().unwrap();
let bind = UpstreamManager::resolve_bind_address(
&Some("198.51.100.20".to_string()),
&Some(vec!["198.51.100.10".to_string()]),
target,
None,
true,
);
assert_eq!(bind, Some("198.51.100.10".parse::<IpAddr>().unwrap()));
}
#[test]
fn resolve_bind_address_does_not_fallback_to_interface_when_bind_addresses_present() {
let target = "203.0.113.10:443".parse::<SocketAddr>().unwrap();
let bind = UpstreamManager::resolve_bind_address(
&Some("198.51.100.20".to_string()),
&Some(vec!["2001:db8::10".to_string()]),
target,
None,
true,
);
assert_eq!(bind, None);
}
}

View File

@@ -1,16 +0,0 @@
[Unit]
Description=Telemt
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
WorkingDirectory=/etc/telemt
ExecStart=/bin/telemt /etc/telemt.toml
Restart=on-failure
LimitNOFILE=262144
TasksMax=8192
MemoryAccounting=yes
[Install]
WantedBy=multi-user.target