diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 1e8d5a0..116c1d4 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -3,8 +3,8 @@ name: Release
on:
push:
tags:
- - '[0-9]+.[0-9]+.[0-9]+' # Matches tags like 3.0.0, 3.1.2, etc.
- workflow_dispatch: # Manual trigger from GitHub Actions UI
+ - '[0-9]+.[0-9]+.[0-9]+' # Matches tags like 3.0.0, 3.1.2, etc.
+ workflow_dispatch: # Manual trigger from GitHub Actions UI
permissions:
contents: read
@@ -84,6 +84,32 @@ jobs:
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.tar.gz
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.sha256
+ build-docker-image:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v2
+
+ - name: Login to GitHub Container Registry
+ uses: docker/login-action@v2
+ with:
+ registry: ghcr.io
+ username: ${{ github.repository_owner }}
+ password: ${{ secrets.TOKEN_GH_DEPLOY }}
+
+ - name: Build and push
+ uses: docker/build-push-action@v6
+ with:
+ context: .
+ push: true
+ tags: ${{ github.ref }}
+
release:
name: Create Release
needs: build
@@ -108,17 +134,17 @@ jobs:
# Extract version from tag (remove 'v' prefix if present)
VERSION="${GITHUB_REF#refs/tags/}"
VERSION="${VERSION#v}"
-
+
# Install cargo-edit for version bumping
cargo install cargo-edit
-
+
# Update Cargo.toml version
cargo set-version "$VERSION"
-
+
# Configure git
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
-
+
# Commit and push changes
#git add Cargo.toml Cargo.lock
#git commit -m "chore: bump version to $VERSION" || echo "No changes to commit"
diff --git a/AGENTS_SYSTEM_PROMT.md b/AGENTS_SYSTEM_PROMT.md
index cec8c38..e6c5f2e 100644
--- a/AGENTS_SYSTEM_PROMT.md
+++ b/AGENTS_SYSTEM_PROMT.md
@@ -1,6 +1,7 @@
## System Prompt — Production Rust Codebase: Modification and Architecture Guidelines
-You are a senior Rust systems engineer acting as a strict code reviewer and implementation partner. Your responses are precise, minimal, and architecturally sound. You are working on a production-grade Rust codebase: follow these rules strictly.
+You are a senior Rust Engineer and pricipal Rust Architect acting as a strict code reviewer and implementation partner.
+Your responses are precise, minimal, and architecturally sound. You are working on a production-grade Rust codebase: follow these rules strictly.
---
@@ -32,6 +33,11 @@ The user can override this behavior with explicit commands:
- `"Make minimal changes"` — no coordinated fixes, narrowest possible diff.
- `"Fix everything"` — apply all coordinated fixes and out-of-scope observations.
+### Core Rule
+
+The codebase must never enter an invalid intermediate state.
+No response may leave the repository in a condition that requires follow-up fixes.
+
---
### 1. Comments and Documentation
@@ -131,16 +137,32 @@ You MUST:
- Document non-obvious logic with comments that describe *why*, not *what*.
- Limit changes strictly to the requested scope (plus coordinated fixes per Section 0).
- Keep all existing symbol names unless renaming is explicitly requested.
-- Preserve global formatting as-is.
+- Preserve global formatting as-is
+- Result every modification in a self-contained, compilable, runnable state of the codebase
You MUST NOT:
-- Use placeholders: no `// ... rest of code`, no `// implement here`, no `/* TODO */` stubs that replace existing working code. Write full, working implementation. If the implementation is unclear, ask first.
-- Refactor code outside the requested scope.
-- Make speculative improvements.
+- Use placeholders: no `// ... rest of code`, no `// implement here`, no `/* TODO */` stubs that replace existing working code. Write full, working implementation. If the implementation is unclear, ask first
+- Refactor code outside the requested scope
+- Make speculative improvements
+- Spawn multiple agents for EDITING
+- Produce partial changes
+- Introduce references to entities that are not yet implemented
+- Leave TODO placeholders in production paths
Note: `todo!()` and `unimplemented!()` are allowed as idiomatic Rust markers for genuinely unfinished code paths.
+Every change must:
+ - compile,
+ - pass type checks,
+ - have no broken imports,
+ - preserve invariants,
+ - not rely on future patches.
+
+If the task requires multiple phases:
+ - either implement all required phases,
+ - or explicitly refuse and explain missing dependencies.
+
---
### 8. Decision Process for Complex Changes
@@ -160,6 +182,7 @@ When facing a non-trivial modification, follow this sequence:
- When provided with partial code, assume the rest of the codebase exists and functions correctly unless stated otherwise.
- Reference existing types, functions, and module structures by their actual names as shown in the provided code.
- When the provided context is insufficient to make a safe change, request the missing context explicitly.
+- Spawn multiple agents for SEARCHING information, code, functions
---
@@ -167,14 +190,14 @@ When facing a non-trivial modification, follow this sequence:
#### Language Policy
-- Code, comments, commit messages, documentation: **English**.
-- Reasoning and explanations in response text: **Russian**.
+- Code, comments, commit messages, documentation ONLY ON **English**!
+- Reasoning and explanations in response text on language from promt
#### Response Structure
Your response MUST consist of two sections:
-**Section 1: `## Reasoning` (in Russian)**
+**Section 1: `## Reasoning`**
- What needs to be done and why.
- Which files and modules are affected.
@@ -205,3 +228,183 @@ If the response exceeds the output limit:
2. List the files that will be provided in subsequent parts.
3. Wait for user confirmation before continuing.
4. No single file may be split across parts.
+
+## 11. Anti-LLM Degeneration Safeguards (Principal-Paranoid, Visionary)
+
+This section exists to prevent common LLM failure modes: scope creep, semantic drift, cargo-cult refactors, performance regressions, contract breakage, and hidden behavior changes.
+
+### 11.1 Non-Negotiable Invariants
+
+- **No semantic drift:** Do not reinterpret requirements, rename concepts, or change meaning of existing terms.
+- **No “helpful refactors”:** Any refactor not explicitly requested is forbidden.
+- **No architectural drift:** Do not introduce new layers, patterns, abstractions, or “clean architecture” migrations unless requested.
+- **No dependency drift:** Do not add crates, features, or versions unless explicitly requested.
+- **No behavior drift:** If a change could alter runtime behavior, you MUST call it out explicitly in `## Reasoning` and justify it.
+
+### 11.2 Minimal Surface Area Rule
+
+- Touch the smallest number of files possible.
+- Prefer local changes over cross-cutting edits.
+- Do not “align style” across a file/module—only adjust the modified region.
+- Do not reorder items, imports, or code unless required for correctness.
+
+### 11.3 No Implicit Contract Changes
+
+Contracts include:
+- public APIs, trait bounds, visibility, error types, timeouts/retries, logging semantics, metrics semantics,
+- protocol formats, framing, padding, keepalive cadence, state machine transitions,
+- concurrency guarantees, cancellation behavior, backpressure behavior.
+
+Rule:
+- If you change a contract, you MUST update all dependents in the same patch AND document the contract delta explicitly.
+
+### 11.4 Hot-Path Preservation (Performance Paranoia)
+
+- Do not introduce extra allocations, cloning, or formatting in hot paths.
+- Do not add logging/metrics on hot paths unless requested.
+- Do not add new locks or broaden lock scope.
+- Prefer `&str` / slices / borrowed data where the codebase already does so.
+- Avoid `String` building for errors/logs if it changes current patterns.
+
+If you cannot prove performance neutrality, label it as risk in `## Reasoning`.
+
+### 11.5 Async / Concurrency Safety (Cancellation & Backpressure)
+
+- No blocking calls inside async contexts.
+- Preserve cancellation safety: do not introduce `await` between lock acquisition and critical invariants unless already present.
+- Preserve backpressure: do not replace bounded channels with unbounded, do not remove flow control.
+- Do not change task lifecycle semantics (spawn patterns, join handles, shutdown order) unless requested.
+- Do not introduce `tokio::spawn` / background tasks unless explicitly requested.
+
+### 11.6 Error Semantics Integrity
+
+- Do not replace structured errors with generic strings.
+- Do not widen/narrow error types or change error categories without explicit approval.
+- Avoid introducing panics in production paths (`unwrap`, `expect`) unless the codebase already treats that path as impossible and documented.
+
+### 11.7 “No New Abstractions” Default
+
+Default stance:
+- No new traits, generics, macros, builder patterns, type-level cleverness, or “frameworking”.
+- If abstraction is necessary, prefer the smallest possible local helper (private function) and justify it.
+
+### 11.8 Negative-Diff Protection
+
+Avoid “diff inflation” patterns:
+- mass edits,
+- moving code between files,
+- rewrapping long lines,
+- rearranging module order,
+- renaming for aesthetics.
+
+If a diff becomes large, STOP and ask before proceeding.
+
+### 11.9 Consistency with Existing Style (But Not Style Refactors)
+
+- Follow existing conventions of the touched module (naming, error style, return patterns).
+- Do not enforce global “best practices” that the codebase does not already use.
+
+### 11.10 Two-Phase Safety Gate (Plan → Patch)
+
+For non-trivial changes:
+1) Provide a micro-plan (1–5 bullets): what files, what functions, what invariants, what risks.
+2) Implement exactly that plan—no extra improvements.
+
+### 11.11 Pre-Response Checklist (Hard Gate)
+
+Before final output, verify internally:
+
+- No unresolved symbols / broken imports.
+- No partially updated call sites.
+- No new public surface changes unless requested.
+- No transitional states / TODO placeholders replacing working code.
+- Changes are atomic: the repository remains buildable and runnable.
+- Any behavior change is explicitly stated.
+
+If any check fails: fix it before responding.
+
+### 11.12 Truthfulness Policy (No Hallucinated Claims)
+
+- Do not claim “this compiles” or “tests pass” unless you actually verified with the available tooling/context.
+- If verification is not possible, state: “Not executed; reasoning-based consistency check only.”
+
+### 11.13 Visionary Guardrail: Preserve Optionality
+
+When multiple valid designs exist, prefer the one that:
+- minimally constrains future evolution,
+- preserves existing extension points,
+- avoids locking the project into a new paradigm,
+- keeps interfaces stable and implementation local.
+
+Default to reversible changes.
+
+### 11.14 Stop Conditions
+
+STOP and ask targeted questions if:
+- required context is missing,
+- a change would cross module boundaries,
+- a contract might change,
+- concurrency/protocol invariants are unclear,
+- the diff is growing beyond a minimal patch.
+
+No guessing.
+
+### 12. Invariant Preservation
+
+You MUST explicitly preserve:
+- Thread-safety guarantees (`Send` / `Sync` expectations).
+- Memory safety assumptions (no hidden `unsafe` expansions).
+- Lock ordering and deadlock invariants.
+- State machine correctness (no new invalid transitions).
+- Backward compatibility of serialized formats (if applicable).
+
+If a change touches concurrency, networking, protocol logic, or state machines,
+you MUST explain why existing invariants remain valid.
+
+### 13. Error Handling Policy
+
+- Do not replace structured errors with generic strings.
+- Preserve existing error propagation semantics.
+- Do not widen or narrow error types without approval.
+- Avoid introducing panics in production paths.
+- Prefer explicit error mapping over implicit conversions.
+
+### 14. Test Safety
+
+- Do not modify existing tests unless the task explicitly requires it.
+- Do not weaken assertions.
+- Preserve determinism in testable components.
+
+### 15. Security Constraints
+
+- Do not weaken cryptographic assumptions.
+- Do not modify key derivation logic without explicit request.
+- Do not change constant-time behavior.
+- Do not introduce logging of secrets.
+- Preserve TLS/MTProto protocol correctness.
+
+### 16. Logging Policy
+
+- Do not introduce excessive logging in hot paths.
+- Do not log sensitive data.
+- Preserve existing log levels and style.
+
+### 17. Pre-Response Verification Checklist
+
+Before producing the final answer, verify internally:
+
+- The change compiles conceptually.
+- No unresolved symbols exist.
+- All modified call sites are updated.
+- No accidental behavioral changes were introduced.
+- Architectural boundaries remain intact.
+
+### 18. Atomic Change Principle
+Every patch must be **atomic and production-safe**.
+* **Self-contained** — no dependency on future patches or unimplemented components.
+* **Build-safe** — the project must compile successfully after the change.
+* **Contract-consistent** — no partial interface or behavioral changes; all dependent code must be updated within the same patch.
+* **No transitional states** — no placeholders, incomplete refactors, or temporary inconsistencies.
+
+**Invariant:** After any single patch, the repository remains fully functional and buildable.
+
diff --git a/Cargo.toml b/Cargo.toml
index f8ee25f..15563dc 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "telemt"
-version = "3.0.8"
+version = "3.0.11"
edition = "2024"
[dependencies]
@@ -20,6 +20,7 @@ sha1 = "0.10"
md-5 = "0.10"
hmac = "0.12"
crc32fast = "1.4"
+crc32c = "0.6"
zeroize = { version = "1.8", features = ["derive"] }
# Network
diff --git a/README.md b/README.md
index 3705856..a88b8df 100644
--- a/README.md
+++ b/README.md
@@ -10,41 +10,77 @@
### 🇷🇺 RU
-18 февраля мы опубликовали `telemt 3.0.3`, он имеет:
+#### Драфтинг LTS и текущие улучшения
-- улучшенный механизм Middle-End Health Check
-- высокоскоростное восстановление инициализации Middle-End
-- меньше задержек на hot-path
-- более корректную работу в Dualstack, а именно - IPv6 Middle-End
-- аккуратное переподключение клиента без дрифта сессий между Middle-End
-- автоматическая деградация на Direct-DC при массовой (>2 ME-DC-групп) недоступности Middle-End
-- автодетект IP за NAT, при возможности - будет выполнен хендшейк с ME, при неудаче - автодеградация
-- единственный известный специальный DC=203 уже добавлен в код: медиа загружаются с CDN в Direct-DC режиме
+С 21 февраля мы начали подготовку LTS-версии.
-[Здесь вы можете найти релиз](https://github.com/telemt/telemt/releases/tag/3.0.3)
+Мы внимательно анализируем весь доступный фидбек.
+Наша цель — сделать LTS-кандидаты максимально стабильными, тщательно отлаженными и готовыми к long-run и highload production-сценариям.
-Если у вас есть компетенции в асинхронных сетевых приложениях, анализе трафика, реверс-инжиниринге или сетевых расследованиях - мы открыты к идеям и pull requests!
+---
+#### Улучшения от 23 февраля
+
+23 февраля были внесены улучшения производительности в режимах **DC** и **Middle-End (ME)**, с акцентом на обратный канал (путь клиент → DC / ME).
+
+Дополнительно реализован ряд изменений, направленных на повышение устойчивости системы:
+
+- Смягчение сетевой нестабильности
+- Повышение устойчивости к десинхронизации криптографии
+- Снижение дрейфа сессий при неблагоприятных условиях
+- Улучшение обработки ошибок в edge-case транспортных сценариях
+
+Релиз:
+[3.0.9](https://github.com/telemt/telemt/releases/tag/3.0.9)
+
+---
+
+Если у вас есть компетенции в:
+
+- Асинхронных сетевых приложениях
+- Анализе трафика
+- Реверс-инжиниринге
+- Сетевых расследованиях
+
+Мы открыты к архитектурным предложениям, идеям и pull requests
### 🇬🇧 EN
-On February 18, we released `telemt 3.0.3`. This version introduces:
+#### LTS Drafting and Ongoing Improvements
-- improved Middle-End Health Check method
-- high-speed recovery of Middle-End init
-- reduced latency on the hot path
-- correct Dualstack support: proper handling of IPv6 Middle-End
-- *clean* client reconnection without session "drift" between Middle-End
-- automatic degradation to Direct-DC mode in case of large-scale (>2 ME-DC groups) Middle-End unavailability
-- automatic public IP detection behind NAT; first - Middle-End handshake is performed, otherwise automatic degradation is applied
-- known special DC=203 is now handled natively: media is delivered from the CDN via Direct-DC mode
+Starting February 21, we began drafting the upcoming LTS version.
-[Release is available here](https://github.com/telemt/telemt/releases/tag/3.0.3)
+We are carefully reviewing and analyzing all available feedback.
+The goal is to ensure that LTS candidates are максимально stable, thoroughly debugged, and ready for long-run and high-load production scenarios.
-If you have expertise in asynchronous network applications, traffic analysis, reverse engineering, or network forensics - we welcome ideas and pull requests!
+---
+#### February 23 Improvements
+
+On February 23, we introduced performance improvements for both **DC** and **Middle-End (ME)** modes, specifically optimizing the reverse channel (client → DC / ME data path).
+
+Additionally, we implemented a set of robustness enhancements designed to:
+
+- Mitigate network-related instability
+- Improve resilience against cryptographic desynchronization
+- Reduce session drift under adverse conditions
+- Improve error handling in edge-case transport scenarios
+
+Release:
+[3.0.9](https://github.com/telemt/telemt/releases/tag/3.0.9)
+
+---
+
+If you have expertise in:
+
+- Asynchronous network applications
+- Traffic analysis
+- Reverse engineering
+- Network forensics
+
+We welcome ideas, architectural feedback, and pull requests.
@@ -178,147 +214,21 @@ then Ctrl+X -> Y -> Enter to save
```toml
# === General Settings ===
[general]
-fast_mode = true
-use_middle_proxy = true
# ad_tag = "00000000000000000000000000000000"
-# Path to proxy-secret binary (auto-downloaded if missing).
-proxy_secret_path = "proxy-secret"
-# disable_colors = false # Disable colored output in logs (useful for files/systemd)
-
-# === Log Level ===
-# Log level: debug | verbose | normal | silent
-# Can be overridden with --silent or --log-level CLI flags
-# RUST_LOG env var takes absolute priority over all of these
-log_level = "normal"
-
-# === Middle Proxy - ME ===
-# Public IP override for ME KDF when behind NAT; leave unset to auto-detect.
-# middle_proxy_nat_ip = "203.0.113.10"
-# Enable STUN probing to discover public IP:port for ME.
-middle_proxy_nat_probe = true
-# Primary STUN server (host:port); defaults to Telegram STUN when empty.
-middle_proxy_nat_stun = "stun.l.google.com:19302"
-# Optional fallback STUN servers list.
-middle_proxy_nat_stun_servers = ["stun1.l.google.com:19302", "stun2.l.google.com:19302"]
-# Desired number of concurrent ME writers in pool.
-middle_proxy_pool_size = 16
-# Pre-initialized warm-standby ME connections kept idle.
-middle_proxy_warm_standby = 8
-# Ignore STUN/interface mismatch and keep ME enabled even if IP differs.
-stun_iface_mismatch_ignore = false
-# Keepalive padding frames - fl==4
-me_keepalive_enabled = true
-me_keepalive_interval_secs = 25 # Period between keepalives
-me_keepalive_jitter_secs = 5 # Jitter added to interval
-me_keepalive_payload_random = true # Randomize 4-byte payload (vs zeros)
-# Stagger extra ME connections on warmup to de-phase lifecycles.
-me_warmup_stagger_enabled = true
-me_warmup_step_delay_ms = 500 # Base delay between extra connects
-me_warmup_step_jitter_ms = 300 # Jitter for warmup delay
-# Reconnect policy knobs.
-me_reconnect_max_concurrent_per_dc = 1 # Parallel reconnects per DC - EXPERIMENTAL! UNSTABLE!
-me_reconnect_backoff_base_ms = 500 # Backoff start
-me_reconnect_backoff_cap_ms = 30000 # Backoff cap
-me_reconnect_fast_retry_count = 11 # Quick retries before backoff
[general.modes]
classic = false
secure = false
tls = true
-[general.links]
-show = "*"
-# show = ["alice", "bob"] # Only show links for alice and bob
-# show = "*" # Show links for all users
-# public_host = "proxy.example.com" # Host (IP or domain) for tg:// links
-# public_port = 443 # Port for tg:// links (default: server.port)
-
-# === Network Parameters ===
-[network]
-# Enable/disable families: true/false/auto(None)
-ipv4 = true
-ipv6 = false # UNSTABLE WITH ME
-# prefer = 4 or 6
-prefer = 4
-multipath = false # EXPERIMENTAL!
-
-# === Server Binding ===
-[server]
-port = 443
-listen_addr_ipv4 = "0.0.0.0"
-listen_addr_ipv6 = "::"
-# listen_unix_sock = "/var/run/telemt.sock" # Unix socket
-# listen_unix_sock_perm = "0666" # Socket file permissions
-# metrics_port = 9090
-# metrics_whitelist = [
-# "192.168.0.0/24",
-# "172.16.0.0/12",
-# "127.0.0.1/32",
-# "::1/128"
-#]
-
-# Listen on multiple interfaces/IPs - IPv4
-[[server.listeners]]
-ip = "0.0.0.0"
-
-# Listen on multiple interfaces/IPs - IPv6
-[[server.listeners]]
-ip = "::"
-
-# === Timeouts (in seconds) ===
-[timeouts]
-client_handshake = 30
-tg_connect = 10
-client_keepalive = 60
-client_ack = 300
-# Quick ME reconnects for single-address DCs (count and per-attempt timeout, ms).
-me_one_retry = 12
-me_one_timeout_ms = 1200
-
# === Anti-Censorship & Masking ===
[censorship]
tls_domain = "petrovich.ru"
-mask = true
-mask_port = 443
-# mask_host = "petrovich.ru" # Defaults to tls_domain if not set
-# mask_unix_sock = "/var/run/nginx.sock" # Unix socket (mutually exclusive with mask_host)
-fake_cert_len = 2048
-
-# === Access Control & Users ===
-[access]
-replay_check_len = 65536
-replay_window_secs = 1800
-ignore_time_skew = false
[access.users]
# format: "username" = "32_hex_chars_secret"
hello = "00000000000000000000000000000000"
-# [access.user_max_tcp_conns]
-# hello = 50
-
-# [access.user_max_unique_ips]
-# hello = 5
-
-# [access.user_data_quota]
-# hello = 1073741824 # 1 GB
-
-# === Upstreams & Routing ===
-[[upstreams]]
-type = "direct"
-enabled = true
-weight = 10
-
-# [[upstreams]]
-# type = "socks5"
-# address = "127.0.0.1:1080"
-# enabled = false
-# weight = 1
-
-# === DC Address Overrides ===
-# [dc_overrides]
-# "203" = "91.105.192.100:443"
-
```
### Advanced
#### Adtag
diff --git a/src/cli.rs b/src/cli.rs
index 25d14f0..cf98121 100644
--- a/src/cli.rs
+++ b/src/cli.rs
@@ -213,6 +213,7 @@ listen_addr_ipv6 = "::"
[[server.listeners]]
ip = "0.0.0.0"
+# reuse_allow = false # Set true only when intentionally running multiple telemt instances on same port
[[server.listeners]]
ip = "::"
@@ -228,6 +229,7 @@ tls_domain = "{domain}"
mask = true
mask_port = 443
fake_cert_len = 2048
+tls_full_cert_ttl_secs = 90
[access]
replay_check_len = 65536
diff --git a/src/config/defaults.rs b/src/config/defaults.rs
index 2dee3e0..90dd6f9 100644
--- a/src/config/defaults.rs
+++ b/src/config/defaults.rs
@@ -122,6 +122,10 @@ pub(crate) fn default_tls_new_session_tickets() -> u8 {
0
}
+pub(crate) fn default_tls_full_cert_ttl_secs() -> u64 {
+ 90
+}
+
pub(crate) fn default_server_hello_delay_min_ms() -> u64 {
0
}
diff --git a/src/config/load.rs b/src/config/load.rs
index ec8011a..827687a 100644
--- a/src/config/load.rs
+++ b/src/config/load.rs
@@ -227,6 +227,7 @@ impl ProxyConfig {
announce: None,
announce_ip: None,
proxy_protocol: None,
+ reuse_allow: false,
});
}
if let Some(ipv6_str) = &config.server.listen_addr_ipv6 {
@@ -236,6 +237,7 @@ impl ProxyConfig {
announce: None,
announce_ip: None,
proxy_protocol: None,
+ reuse_allow: false,
});
}
}
diff --git a/src/config/types.rs b/src/config/types.rs
index 6c54598..a303db8 100644
--- a/src/config/types.rs
+++ b/src/config/types.rs
@@ -74,8 +74,8 @@ pub struct ProxyModes {
impl Default for ProxyModes {
fn default() -> Self {
Self {
- classic: true,
- secure: true,
+ classic: false,
+ secure: false,
tls: true,
}
}
@@ -118,7 +118,7 @@ impl Default for NetworkConfig {
fn default() -> Self {
Self {
ipv4: true,
- ipv6: None,
+ ipv6: Some(false),
prefer: 4,
multipath: false,
stun_servers: default_stun_servers(),
@@ -291,7 +291,7 @@ impl Default for GeneralConfig {
middle_proxy_nat_stun: None,
middle_proxy_nat_stun_servers: Vec::new(),
middle_proxy_pool_size: default_pool_size(),
- middle_proxy_warm_standby: 0,
+ middle_proxy_warm_standby: 8,
me_keepalive_enabled: true,
me_keepalive_interval_secs: default_keepalive_interval(),
me_keepalive_jitter_secs: default_keepalive_jitter(),
@@ -299,10 +299,10 @@ impl Default for GeneralConfig {
me_warmup_stagger_enabled: true,
me_warmup_step_delay_ms: default_warmup_step_delay_ms(),
me_warmup_step_jitter_ms: default_warmup_step_jitter_ms(),
- me_reconnect_max_concurrent_per_dc: 1,
+ me_reconnect_max_concurrent_per_dc: 4,
me_reconnect_backoff_base_ms: default_reconnect_backoff_base_ms(),
me_reconnect_backoff_cap_ms: default_reconnect_backoff_cap_ms(),
- me_reconnect_fast_retry_count: 1,
+ me_reconnect_fast_retry_count: 8,
stun_iface_mismatch_ignore: false,
unknown_dc_log_path: default_unknown_dc_log_path(),
log_level: LogLevel::Normal,
@@ -474,6 +474,12 @@ pub struct AntiCensorshipConfig {
#[serde(default = "default_tls_new_session_tickets")]
pub tls_new_session_tickets: u8,
+ /// TTL in seconds for sending full certificate payload per client IP.
+ /// First client connection per (SNI domain, client IP) gets full cert payload.
+ /// Subsequent handshakes within TTL use compact cert metadata payload.
+ #[serde(default = "default_tls_full_cert_ttl_secs")]
+ pub tls_full_cert_ttl_secs: u64,
+
/// Enforce ALPN echo of client preference.
#[serde(default = "default_alpn_enforce")]
pub alpn_enforce: bool,
@@ -494,6 +500,7 @@ impl Default for AntiCensorshipConfig {
server_hello_delay_min_ms: default_server_hello_delay_min_ms(),
server_hello_delay_max_ms: default_server_hello_delay_max_ms(),
tls_new_session_tickets: default_tls_new_session_tickets(),
+ tls_full_cert_ttl_secs: default_tls_full_cert_ttl_secs(),
alpn_enforce: default_alpn_enforce(),
}
}
@@ -603,6 +610,10 @@ pub struct ListenerConfig {
/// Per-listener PROXY protocol override. When set, overrides global server.proxy_protocol.
#[serde(default)]
pub proxy_protocol: Option,
+ /// Allow multiple telemt instances to listen on the same IP:port (SO_REUSEPORT).
+ /// Default is false for safety.
+ #[serde(default)]
+ pub reuse_allow: bool,
}
// ============= ShowLink =============
diff --git a/src/crypto/hash.rs b/src/crypto/hash.rs
index 1586e50..d3f6f55 100644
--- a/src/crypto/hash.rs
+++ b/src/crypto/hash.rs
@@ -55,6 +55,11 @@ pub fn crc32(data: &[u8]) -> u32 {
crc32fast::hash(data)
}
+/// CRC32C (Castagnoli)
+pub fn crc32c(data: &[u8]) -> u32 {
+ crc32c::crc32c(data)
+}
+
/// Build the exact prekey buffer used by Telegram Middle Proxy KDF.
///
/// Returned buffer layout (IPv4):
diff --git a/src/crypto/mod.rs b/src/crypto/mod.rs
index 40951c6..266a3cb 100644
--- a/src/crypto/mod.rs
+++ b/src/crypto/mod.rs
@@ -5,5 +5,8 @@ pub mod hash;
pub mod random;
pub use aes::{AesCtr, AesCbc};
-pub use hash::{sha256, sha256_hmac, sha1, md5, crc32, derive_middleproxy_keys, build_middleproxy_prekey};
+pub use hash::{
+ build_middleproxy_prekey, crc32, crc32c, derive_middleproxy_keys, md5, sha1, sha256,
+ sha256_hmac,
+};
pub use random::SecureRandom;
diff --git a/src/crypto/random.rs b/src/crypto/random.rs
index 99aa5f3..f3432e0 100644
--- a/src/crypto/random.rs
+++ b/src/crypto/random.rs
@@ -49,19 +49,32 @@ impl SecureRandom {
}
}
- /// Generate random bytes
- pub fn bytes(&self, len: usize) -> Vec {
+ /// Fill a caller-provided buffer with random bytes.
+ pub fn fill(&self, out: &mut [u8]) {
let mut inner = self.inner.lock();
const CHUNK_SIZE: usize = 512;
-
- while inner.buffer.len() < len {
- let mut chunk = vec![0u8; CHUNK_SIZE];
- inner.rng.fill_bytes(&mut chunk);
- inner.cipher.apply(&mut chunk);
- inner.buffer.extend_from_slice(&chunk);
+
+ let mut written = 0usize;
+ while written < out.len() {
+ if inner.buffer.is_empty() {
+ let mut chunk = vec![0u8; CHUNK_SIZE];
+ inner.rng.fill_bytes(&mut chunk);
+ inner.cipher.apply(&mut chunk);
+ inner.buffer.extend_from_slice(&chunk);
+ }
+
+ let take = (out.len() - written).min(inner.buffer.len());
+ out[written..written + take].copy_from_slice(&inner.buffer[..take]);
+ inner.buffer.drain(..take);
+ written += take;
}
-
- inner.buffer.drain(..len).collect()
+ }
+
+ /// Generate random bytes
+ pub fn bytes(&self, len: usize) -> Vec {
+ let mut out = vec![0u8; len];
+ self.fill(&mut out);
+ out
}
/// Generate random number in range [0, max)
diff --git a/src/main.rs b/src/main.rs
index a9b0e0a..61debb9 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -38,7 +38,7 @@ use crate::stream::BufferPool;
use crate::transport::middle_proxy::{
MePool, fetch_proxy_config, run_me_ping, MePingFamily, MePingSample, format_sample_line,
};
-use crate::transport::{ListenOptions, UpstreamManager, create_listener};
+use crate::transport::{ListenOptions, UpstreamManager, create_listener, find_listener_processes};
use crate::tls_front::TlsFrontCache;
fn parse_cli() -> (String, bool, Option) {
@@ -265,7 +265,7 @@ async fn main() -> std::result::Result<(), Box> {
}
// Connection concurrency limit
- let _max_connections = Arc::new(Semaphore::new(10_000));
+ let max_connections = Arc::new(Semaphore::new(10_000));
if use_middle_proxy && !decision.ipv4_me && !decision.ipv6_me {
warn!("No usable IP family for Middle Proxy detected; falling back to direct DC");
@@ -715,6 +715,7 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
continue;
}
let options = ListenOptions {
+ reuse_port: listener_conf.reuse_allow,
ipv6_only: listener_conf.ip.is_ipv6(),
..Default::default()
};
@@ -753,7 +754,33 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
listeners.push((listener, listener_proxy_protocol));
}
Err(e) => {
- error!("Failed to bind to {}: {}", addr, e);
+ if e.kind() == std::io::ErrorKind::AddrInUse {
+ let owners = find_listener_processes(addr);
+ if owners.is_empty() {
+ error!(
+ %addr,
+ "Failed to bind: address already in use (owner process unresolved)"
+ );
+ } else {
+ for owner in owners {
+ error!(
+ %addr,
+ pid = owner.pid,
+ process = %owner.process,
+ "Failed to bind: address already in use"
+ );
+ }
+ }
+
+ if !listener_conf.reuse_allow {
+ error!(
+ %addr,
+ "reuse_allow=false; set [[server.listeners]].reuse_allow=true to allow multi-instance listening"
+ );
+ }
+ } else {
+ error!("Failed to bind to {}: {}", addr, e);
+ }
}
}
}
@@ -817,6 +844,7 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
let me_pool = me_pool.clone();
let tls_cache = tls_cache.clone();
let ip_tracker = ip_tracker.clone();
+ let max_connections_unix = max_connections.clone();
tokio::spawn(async move {
let unix_conn_counter = std::sync::Arc::new(std::sync::atomic::AtomicU64::new(1));
@@ -824,6 +852,13 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
loop {
match unix_listener.accept().await {
Ok((stream, _)) => {
+ let permit = match max_connections_unix.clone().acquire_owned().await {
+ Ok(permit) => permit,
+ Err(_) => {
+ error!("Connection limiter is closed");
+ break;
+ }
+ };
let conn_id = unix_conn_counter.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
let fake_peer = SocketAddr::from(([127, 0, 0, 1], (conn_id % 65535) as u16));
@@ -839,6 +874,7 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
let proxy_protocol_enabled = config.server.proxy_protocol;
tokio::spawn(async move {
+ let _permit = permit;
if let Err(e) = crate::proxy::client::handle_client_stream(
stream, fake_peer, config, stats,
upstream_manager, replay_checker, buffer_pool, rng,
@@ -906,11 +942,19 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
let me_pool = me_pool.clone();
let tls_cache = tls_cache.clone();
let ip_tracker = ip_tracker.clone();
+ let max_connections_tcp = max_connections.clone();
tokio::spawn(async move {
loop {
match listener.accept().await {
Ok((stream, peer_addr)) => {
+ let permit = match max_connections_tcp.clone().acquire_owned().await {
+ Ok(permit) => permit,
+ Err(_) => {
+ error!("Connection limiter is closed");
+ break;
+ }
+ };
let config = config_rx.borrow_and_update().clone();
let stats = stats.clone();
let upstream_manager = upstream_manager.clone();
@@ -923,6 +967,7 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
let proxy_protocol_enabled = listener_proxy_protocol;
tokio::spawn(async move {
+ let _permit = permit;
if let Err(e) = ClientHandler::new(
stream,
peer_addr,
diff --git a/src/metrics.rs b/src/metrics.rs
index 940a0d8..e00091f 100644
--- a/src/metrics.rs
+++ b/src/metrics.rs
@@ -100,6 +100,14 @@ fn render_metrics(stats: &Stats) -> String {
let _ = writeln!(out, "# TYPE telemt_me_keepalive_failed_total counter");
let _ = writeln!(out, "telemt_me_keepalive_failed_total {}", stats.get_me_keepalive_failed());
+ let _ = writeln!(out, "# HELP telemt_me_keepalive_pong_total ME keepalive pong replies");
+ let _ = writeln!(out, "# TYPE telemt_me_keepalive_pong_total counter");
+ let _ = writeln!(out, "telemt_me_keepalive_pong_total {}", stats.get_me_keepalive_pong());
+
+ let _ = writeln!(out, "# HELP telemt_me_keepalive_timeout_total ME keepalive ping timeouts");
+ let _ = writeln!(out, "# TYPE telemt_me_keepalive_timeout_total counter");
+ let _ = writeln!(out, "telemt_me_keepalive_timeout_total {}", stats.get_me_keepalive_timeout());
+
let _ = writeln!(out, "# HELP telemt_me_reconnect_attempts_total ME reconnect attempts");
let _ = writeln!(out, "# TYPE telemt_me_reconnect_attempts_total counter");
let _ = writeln!(out, "telemt_me_reconnect_attempts_total {}", stats.get_me_reconnect_attempts());
@@ -108,6 +116,30 @@ fn render_metrics(stats: &Stats) -> String {
let _ = writeln!(out, "# TYPE telemt_me_reconnect_success_total counter");
let _ = writeln!(out, "telemt_me_reconnect_success_total {}", stats.get_me_reconnect_success());
+ let _ = writeln!(out, "# HELP telemt_me_crc_mismatch_total ME CRC mismatches");
+ let _ = writeln!(out, "# TYPE telemt_me_crc_mismatch_total counter");
+ let _ = writeln!(out, "telemt_me_crc_mismatch_total {}", stats.get_me_crc_mismatch());
+
+ let _ = writeln!(out, "# HELP telemt_me_seq_mismatch_total ME sequence mismatches");
+ let _ = writeln!(out, "# TYPE telemt_me_seq_mismatch_total counter");
+ let _ = writeln!(out, "telemt_me_seq_mismatch_total {}", stats.get_me_seq_mismatch());
+
+ let _ = writeln!(out, "# HELP telemt_me_route_drop_no_conn_total ME route drops: no conn");
+ let _ = writeln!(out, "# TYPE telemt_me_route_drop_no_conn_total counter");
+ let _ = writeln!(out, "telemt_me_route_drop_no_conn_total {}", stats.get_me_route_drop_no_conn());
+
+ let _ = writeln!(out, "# HELP telemt_me_route_drop_channel_closed_total ME route drops: channel closed");
+ let _ = writeln!(out, "# TYPE telemt_me_route_drop_channel_closed_total counter");
+ let _ = writeln!(out, "telemt_me_route_drop_channel_closed_total {}", stats.get_me_route_drop_channel_closed());
+
+ let _ = writeln!(out, "# HELP telemt_me_route_drop_queue_full_total ME route drops: queue full");
+ let _ = writeln!(out, "# TYPE telemt_me_route_drop_queue_full_total counter");
+ let _ = writeln!(out, "telemt_me_route_drop_queue_full_total {}", stats.get_me_route_drop_queue_full());
+
+ let _ = writeln!(out, "# HELP telemt_secure_padding_invalid_total Invalid secure frame lengths");
+ let _ = writeln!(out, "# TYPE telemt_secure_padding_invalid_total counter");
+ let _ = writeln!(out, "telemt_secure_padding_invalid_total {}", stats.get_secure_padding_invalid());
+
let _ = writeln!(out, "# HELP telemt_user_connections_total Per-user total connections");
let _ = writeln!(out, "# TYPE telemt_user_connections_total counter");
let _ = writeln!(out, "# HELP telemt_user_connections_current Per-user active connections");
diff --git a/src/protocol/constants.rs b/src/protocol/constants.rs
index 826f2b2..c930a1b 100644
--- a/src/protocol/constants.rs
+++ b/src/protocol/constants.rs
@@ -156,14 +156,28 @@ pub const MAX_TLS_RECORD_SIZE: usize = 16384;
/// RFC 8446 §5.2 allows up to 16384 + 256 bytes of ciphertext
pub const MAX_TLS_CHUNK_SIZE: usize = 16384 + 256;
-/// Generate padding length for Secure Intermediate protocol.
-/// Total (data + padding) must not be divisible by 4 per MTProto spec.
-pub fn secure_padding_len(data_len: usize, rng: &SecureRandom) -> usize {
- if data_len % 4 == 0 {
- (rng.range(3) + 1) as usize // 1-3
- } else {
- rng.range(4) as usize // 0-3
+/// Secure Intermediate payload is expected to be 4-byte aligned.
+pub fn is_valid_secure_payload_len(data_len: usize) -> bool {
+ data_len % 4 == 0
+}
+
+/// Compute Secure Intermediate payload length from wire length.
+/// Secure mode strips up to 3 random tail bytes by truncating to 4-byte boundary.
+pub fn secure_payload_len_from_wire_len(wire_len: usize) -> Option {
+ if wire_len < 4 {
+ return None;
}
+ Some(wire_len - (wire_len % 4))
+}
+
+/// Generate padding length for Secure Intermediate protocol.
+/// Data must be 4-byte aligned; padding is 1..=3 so total is never divisible by 4.
+pub fn secure_padding_len(data_len: usize, rng: &SecureRandom) -> usize {
+ debug_assert!(
+ is_valid_secure_payload_len(data_len),
+ "Secure payload must be 4-byte aligned, got {data_len}"
+ );
+ (rng.range(3) + 1) as usize
}
// ============= Timeouts =============
@@ -297,6 +311,10 @@ pub mod rpc_flags {
pub const FLAG_ABRIDGED: u32 = 0x40000000;
pub const FLAG_QUICKACK: u32 = 0x80000000;
}
+
+ pub mod rpc_crypto_flags {
+ pub const USE_CRC32C: u32 = 0x800;
+ }
pub const ME_CONNECT_TIMEOUT_SECS: u64 = 5;
pub const ME_HANDSHAKE_TIMEOUT_SECS: u64 = 10;
@@ -332,4 +350,43 @@ mod tests {
assert_eq!(TG_DATACENTERS_V4.len(), 5);
assert_eq!(TG_DATACENTERS_V6.len(), 5);
}
+
+ #[test]
+ fn secure_padding_never_produces_aligned_total() {
+ let rng = SecureRandom::new();
+ for data_len in (0..1000).step_by(4) {
+ for _ in 0..100 {
+ let padding = secure_padding_len(data_len, &rng);
+ assert!(
+ padding <= 3,
+ "padding out of range: data_len={data_len}, padding={padding}"
+ );
+ assert_ne!(
+ (data_len + padding) % 4,
+ 0,
+ "invariant violated: data_len={data_len}, padding={padding}, total={}",
+ data_len + padding
+ );
+ }
+ }
+ }
+
+ #[test]
+ fn secure_wire_len_roundtrip_for_aligned_payload() {
+ for payload_len in (4..4096).step_by(4) {
+ for padding in 0..=3usize {
+ let wire_len = payload_len + padding;
+ let recovered = secure_payload_len_from_wire_len(wire_len);
+ assert_eq!(recovered, Some(payload_len));
+ }
+ }
+ }
+
+ #[test]
+ fn secure_wire_len_rejects_too_short_frames() {
+ assert_eq!(secure_payload_len_from_wire_len(0), None);
+ assert_eq!(secure_payload_len_from_wire_len(1), None);
+ assert_eq!(secure_payload_len_from_wire_len(2), None);
+ assert_eq!(secure_payload_len_from_wire_len(3), None);
+ }
}
diff --git a/src/proxy/handshake.rs b/src/proxy/handshake.rs
index 8d48c8b..750d839 100644
--- a/src/proxy/handshake.rs
+++ b/src/proxy/handshake.rs
@@ -2,6 +2,7 @@
use std::net::SocketAddr;
use std::sync::Arc;
+use std::time::Duration;
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
use tracing::{debug, warn, trace, info};
use zeroize::Zeroize;
@@ -108,11 +109,23 @@ where
let cached = if config.censorship.tls_emulation {
if let Some(cache) = tls_cache.as_ref() {
- if let Some(sni) = tls::extract_sni_from_client_hello(handshake) {
- Some(cache.get(&sni).await)
+ let selected_domain = if let Some(sni) = tls::extract_sni_from_client_hello(handshake) {
+ if cache.contains_domain(&sni).await {
+ sni
+ } else {
+ config.censorship.tls_domain.clone()
+ }
} else {
- Some(cache.get(&config.censorship.tls_domain).await)
- }
+ config.censorship.tls_domain.clone()
+ };
+ let cached_entry = cache.get(&selected_domain).await;
+ let use_full_cert_payload = cache
+ .take_full_cert_budget_for_ip(
+ peer.ip(),
+ Duration::from_secs(config.censorship.tls_full_cert_ttl_secs),
+ )
+ .await;
+ Some((cached_entry, use_full_cert_payload))
} else {
None
}
@@ -137,12 +150,13 @@ where
None
};
- let response = if let Some(cached_entry) = cached {
+ let response = if let Some((cached_entry, use_full_cert_payload)) = cached {
emulator::build_emulated_server_hello(
secret,
&validation.digest,
&validation.session_id,
&cached_entry,
+ use_full_cert_payload,
rng,
selected_alpn.clone(),
config.censorship.tls_new_session_tickets,
@@ -253,7 +267,11 @@ where
let mode_ok = match proto_tag {
ProtoTag::Secure => {
- if is_tls { config.general.modes.tls } else { config.general.modes.secure }
+ if is_tls {
+ config.general.modes.tls || config.general.modes.secure
+ } else {
+ config.general.modes.secure || config.general.modes.tls
+ }
}
ProtoTag::Intermediate | ProtoTag::Abridged => config.general.modes.classic,
};
diff --git a/src/proxy/middle_relay.rs b/src/proxy/middle_relay.rs
index 0735d01..3b98112 100644
--- a/src/proxy/middle_relay.rs
+++ b/src/proxy/middle_relay.rs
@@ -2,7 +2,7 @@ use std::net::SocketAddr;
use std::sync::Arc;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
-use tokio::sync::oneshot;
+use tokio::sync::{mpsc, oneshot};
use tracing::{debug, info, trace, warn};
use crate::config::ProxyConfig;
@@ -14,6 +14,11 @@ use crate::stats::Stats;
use crate::stream::{BufferPool, CryptoReader, CryptoWriter};
use crate::transport::middle_proxy::{MePool, MeResponse, proto_flags_for_tag};
+enum C2MeCommand {
+ Data { payload: Vec, flags: u32 },
+ Close,
+}
+
pub(crate) async fn handle_via_middle_proxy(
mut crypto_reader: CryptoReader,
crypto_writer: CryptoWriter,
@@ -59,6 +64,30 @@ where
let frame_limit = config.general.max_client_frame;
+ let (c2me_tx, mut c2me_rx) = mpsc::channel::(1024);
+ let me_pool_c2me = me_pool.clone();
+ let c2me_sender = tokio::spawn(async move {
+ while let Some(cmd) = c2me_rx.recv().await {
+ match cmd {
+ C2MeCommand::Data { payload, flags } => {
+ me_pool_c2me.send_proxy_req(
+ conn_id,
+ success.dc_idx,
+ peer,
+ translated_local_addr,
+ &payload,
+ flags,
+ ).await?;
+ }
+ C2MeCommand::Close => {
+ let _ = me_pool_c2me.send_close(conn_id).await;
+ return Ok(());
+ }
+ }
+ }
+ Ok(())
+ });
+
let (stop_tx, mut stop_rx) = oneshot::channel::<()>();
let mut me_rx_task = me_rx;
let stats_clone = stats.clone();
@@ -66,6 +95,7 @@ where
let user_clone = user.clone();
let me_writer = tokio::spawn(async move {
let mut writer = crypto_writer;
+ let mut frame_buf = Vec::with_capacity(16 * 1024);
loop {
tokio::select! {
msg = me_rx_task.recv() => {
@@ -73,7 +103,44 @@ where
Some(MeResponse::Data { flags, data }) => {
trace!(conn_id, bytes = data.len(), flags, "ME->C data");
stats_clone.add_user_octets_to(&user_clone, data.len() as u64);
- write_client_payload(&mut writer, proto_tag, flags, &data, rng_clone.as_ref()).await?;
+ write_client_payload(
+ &mut writer,
+ proto_tag,
+ flags,
+ &data,
+ rng_clone.as_ref(),
+ &mut frame_buf,
+ )
+ .await?;
+
+ // Drain all immediately queued ME responses and flush once.
+ while let Ok(next) = me_rx_task.try_recv() {
+ match next {
+ MeResponse::Data { flags, data } => {
+ trace!(conn_id, bytes = data.len(), flags, "ME->C data (batched)");
+ stats_clone.add_user_octets_to(&user_clone, data.len() as u64);
+ write_client_payload(
+ &mut writer,
+ proto_tag,
+ flags,
+ &data,
+ rng_clone.as_ref(),
+ &mut frame_buf,
+ ).await?;
+ }
+ MeResponse::Ack(confirm) => {
+ trace!(conn_id, confirm, "ME->C quickack (batched)");
+ write_client_ack(&mut writer, proto_tag, confirm).await?;
+ }
+ MeResponse::Close => {
+ debug!(conn_id, "ME sent close (batched)");
+ let _ = writer.flush().await;
+ return Ok(());
+ }
+ }
+ }
+
+ writer.flush().await.map_err(ProxyError::Io)?;
}
Some(MeResponse::Ack(confirm)) => {
trace!(conn_id, confirm, "ME->C quickack");
@@ -81,6 +148,7 @@ where
}
Some(MeResponse::Close) => {
debug!(conn_id, "ME sent close");
+ let _ = writer.flush().await;
return Ok(());
}
None => {
@@ -99,8 +167,16 @@ where
let mut main_result: Result<()> = Ok(());
let mut client_closed = false;
+ let mut frame_counter: u64 = 0;
loop {
- match read_client_payload(&mut crypto_reader, proto_tag, frame_limit, &user).await {
+ match read_client_payload(
+ &mut crypto_reader,
+ proto_tag,
+ frame_limit,
+ &user,
+ &mut frame_counter,
+ &stats,
+ ).await {
Ok(Some((payload, quickack))) => {
trace!(conn_id, bytes = payload.len(), "C->ME frame");
stats.add_user_octets_from(&user, payload.len() as u64);
@@ -111,22 +187,20 @@ where
if payload.len() >= 8 && payload[..8].iter().all(|b| *b == 0) {
flags |= RPC_FLAG_NOT_ENCRYPTED;
}
- if let Err(e) = me_pool.send_proxy_req(
- conn_id,
- success.dc_idx,
- peer,
- translated_local_addr,
- &payload,
- flags,
- ).await {
- main_result = Err(e);
+ // Keep client read loop lightweight: route heavy ME send path via a dedicated task.
+ if c2me_tx
+ .send(C2MeCommand::Data { payload, flags })
+ .await
+ .is_err()
+ {
+ main_result = Err(ProxyError::Proxy("ME sender channel closed".into()));
break;
}
}
Ok(None) => {
debug!(conn_id, "Client EOF");
client_closed = true;
- let _ = me_pool.send_close(conn_id).await;
+ let _ = c2me_tx.send(C2MeCommand::Close).await;
break;
}
Err(e) => {
@@ -136,6 +210,11 @@ where
}
}
+ drop(c2me_tx);
+ let c2me_result = c2me_sender
+ .await
+ .unwrap_or_else(|e| Err(ProxyError::Proxy(format!("ME sender join error: {e}"))));
+
let _ = stop_tx.send(());
let mut writer_result = me_writer
.await
@@ -151,10 +230,11 @@ where
}
}
- let result = match (main_result, writer_result) {
- (Ok(()), Ok(())) => Ok(()),
- (Err(e), _) => Err(e),
- (_, Err(e)) => Err(e),
+ let result = match (main_result, c2me_result, writer_result) {
+ (Ok(()), Ok(()), Ok(())) => Ok(()),
+ (Err(e), _, _) => Err(e),
+ (_, Err(e), _) => Err(e),
+ (_, _, Err(e)) => Err(e),
};
debug!(user = %user, conn_id, "ME relay cleanup");
@@ -168,73 +248,123 @@ async fn read_client_payload(
proto_tag: ProtoTag,
max_frame: usize,
user: &str,
+ frame_counter: &mut u64,
+ stats: &Stats,
) -> Result