mirror of https://github.com/telemt/telemt.git
fix(proxy): исправление wire-transparency при fallback и усиление безопасности
Исправлена критическая логическая ошибка в цепочке Fake TLS -> MTProto. Ранее при валидном TLS-хендшейке, но неверном MTProto-пакете, прокси ошибочно передавал в маскирующий релей обернутый (FakeTls) поток. Теперь транспорт корректно разворачивается (unwrap) до сырого сокета через .into_inner(), обеспечивая полную прозрачность (wire-transparency) для DPI и маскирующего бэкенда. Security & Hardening: - Логика приведена в соответствие с требованиями OWASP ASVS L2 (V5: Validation, Sanitization and Encoding). - Реализовано поведение "fail-closed": при любой ошибке верификации прокси мимикрирует под обычный веб-сервер, не раскрывая своей роли. - Улучшена диагностика и логирование состояний аутентификации для защиты от активного пробинга. Adversarial Testing (Black-hat mindset): - Добавлен отдельный пакет `client_tls_mtproto_fallback_security_tests.rs` (18+ тестов). - Покрыты сценарии: хаос-фрагментация (побайтовая нарезка TLS-записей), record-splitting, half-close состояния, сбросы бэкенда и replay-pressure. - В `client_adversarial_tests.rs` добавлено 10+ тестов на "злые" гонки (race conditions), утечки лимитов по IP и проверку изоляции состояний параллельных сессий. - Все 832 теста проходят (passed) в locked-режиме.
This commit is contained in:
parent
a78c3e3ebd
commit
8f1ffe8c25
|
|
@ -295,8 +295,16 @@ where
|
|||
).await {
|
||||
HandshakeResult::Success(result) => result,
|
||||
HandshakeResult::BadClient { reader, writer } => {
|
||||
// MTProto failed after TLS ServerHello was already sent.
|
||||
// Switch fallback relay back to raw transport so the mask
|
||||
// backend receives valid TLS records (not unwrapped payload).
|
||||
let reader = reader.into_inner();
|
||||
let writer = writer.into_inner();
|
||||
stats.increment_connects_bad();
|
||||
debug!(peer = %peer, "Valid TLS but invalid MTProto handshake");
|
||||
debug!(
|
||||
peer = %peer,
|
||||
"Authenticated TLS session failed MTProto validation; engaging masking fallback"
|
||||
);
|
||||
handle_bad_client(
|
||||
reader,
|
||||
writer,
|
||||
|
|
@ -708,8 +716,16 @@ impl RunningClientHandler {
|
|||
{
|
||||
HandshakeResult::Success(result) => result,
|
||||
HandshakeResult::BadClient { reader, writer } => {
|
||||
// MTProto failed after TLS ServerHello was already sent.
|
||||
// Switch fallback relay back to raw transport so the mask
|
||||
// backend receives valid TLS records (not unwrapped payload).
|
||||
let reader = reader.into_inner();
|
||||
let writer = writer.into_inner();
|
||||
stats.increment_connects_bad();
|
||||
debug!(peer = %peer, "Valid TLS but invalid MTProto handshake");
|
||||
debug!(
|
||||
peer = %peer,
|
||||
"Authenticated TLS session failed MTProto validation; engaging masking fallback"
|
||||
);
|
||||
handle_bad_client(
|
||||
reader,
|
||||
writer,
|
||||
|
|
@ -1044,3 +1060,7 @@ mod security_tests;
|
|||
#[cfg(test)]
|
||||
#[path = "client_adversarial_tests.rs"]
|
||||
mod adversarial_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "client_tls_mtproto_fallback_security_tests.rs"]
|
||||
mod tls_mtproto_fallback_security_tests;
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ use crate::stats::Stats;
|
|||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::error::ProxyError;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
|
@ -107,3 +108,357 @@ async fn client_ip_tracker_race_condition_stress() {
|
|||
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0, "IP count must be zero after balanced add/remove burst");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_limit_burst_peak_never_exceeds_cap() {
|
||||
let user = "peak-cap-user";
|
||||
let limit = 32;
|
||||
let attempts = 256;
|
||||
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_max_tcp_conns.insert(user.to_string(), limit);
|
||||
|
||||
let peak = Arc::new(AtomicU64::new(0));
|
||||
let mut tasks = Vec::with_capacity(attempts);
|
||||
|
||||
for i in 0..attempts {
|
||||
let stats = Arc::clone(&stats);
|
||||
let ip_tracker = Arc::clone(&ip_tracker);
|
||||
let config = config.clone();
|
||||
let peak = Arc::clone(&peak);
|
||||
|
||||
tasks.push(tokio::spawn(async move {
|
||||
let peer = SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(203, 0, 113, (i % 250 + 1) as u8)),
|
||||
20000 + i as u16,
|
||||
);
|
||||
|
||||
let acquired = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
peer,
|
||||
ip_tracker,
|
||||
)
|
||||
.await;
|
||||
|
||||
if let Ok(reservation) = acquired {
|
||||
let now = stats.get_user_curr_connects(user);
|
||||
loop {
|
||||
let prev = peak.load(Ordering::Relaxed);
|
||||
if now <= prev {
|
||||
break;
|
||||
}
|
||||
if peak
|
||||
.compare_exchange(prev, now, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.is_ok()
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
tokio::time::sleep(Duration::from_millis(2)).await;
|
||||
drop(reservation);
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
futures::future::join_all(tasks).await;
|
||||
ip_tracker.drain_cleanup_queue().await;
|
||||
|
||||
assert!(
|
||||
peak.load(Ordering::Relaxed) <= limit as u64,
|
||||
"peak concurrent reservations must not exceed configured cap"
|
||||
);
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_quota_rejection_never_mutates_live_counters() {
|
||||
let user = "quota-reject-user";
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_data_quota.insert(user.to_string(), 0);
|
||||
|
||||
let peer: SocketAddr = "198.51.100.201:31111".parse().unwrap();
|
||||
let res = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(matches!(res, Err(ProxyError::DataQuotaExceeded { .. })));
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_expiration_rejection_never_mutates_live_counters() {
|
||||
let user = "expired-user";
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config
|
||||
.access
|
||||
.user_expirations
|
||||
.insert(user.to_string(), chrono::Utc::now() - chrono::Duration::seconds(1));
|
||||
|
||||
let peer: SocketAddr = "198.51.100.202:31112".parse().unwrap();
|
||||
let res = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(matches!(res, Err(ProxyError::UserExpired { .. })));
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_ip_limit_failure_rolls_back_counter_exactly() {
|
||||
let user = "ip-limit-rollback-user";
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 1).await;
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_max_tcp_conns.insert(user.to_string(), 16);
|
||||
|
||||
let first_peer: SocketAddr = "198.51.100.203:31113".parse().unwrap();
|
||||
let first = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
first_peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let second_peer: SocketAddr = "198.51.100.204:31114".parse().unwrap();
|
||||
let second = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
second_peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(matches!(second, Err(ProxyError::ConnectionLimitExceeded { .. })));
|
||||
assert_eq!(stats.get_user_curr_connects(user), 1);
|
||||
|
||||
drop(first);
|
||||
ip_tracker.drain_cleanup_queue().await;
|
||||
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_parallel_limit_checks_success_path_leaves_no_residue() {
|
||||
let user = "parallel-check-success-user";
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 128).await;
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_max_tcp_conns.insert(user.to_string(), 128);
|
||||
|
||||
let mut tasks = Vec::new();
|
||||
for i in 0..128u16 {
|
||||
let stats = Arc::clone(&stats);
|
||||
let ip_tracker = Arc::clone(&ip_tracker);
|
||||
let config = config.clone();
|
||||
|
||||
tasks.push(tokio::spawn(async move {
|
||||
let peer = SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(10, 10, (i / 255) as u8, (i % 255 + 1) as u8)),
|
||||
32000 + i,
|
||||
);
|
||||
RunningClientHandler::check_user_limits_static(user, &config, &stats, peer, &ip_tracker)
|
||||
.await
|
||||
}));
|
||||
}
|
||||
|
||||
for result in futures::future::join_all(tasks).await {
|
||||
assert!(result.unwrap().is_ok());
|
||||
}
|
||||
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_parallel_limit_checks_failure_path_leaves_no_residue() {
|
||||
let user = "parallel-check-failure-user";
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 0).await;
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_max_tcp_conns.insert(user.to_string(), 512);
|
||||
|
||||
let mut tasks = Vec::new();
|
||||
for i in 0..64u16 {
|
||||
let stats = Arc::clone(&stats);
|
||||
let ip_tracker = Arc::clone(&ip_tracker);
|
||||
let config = config.clone();
|
||||
|
||||
tasks.push(tokio::spawn(async move {
|
||||
let peer = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(172, 16, 0, (i % 250 + 1) as u8)), 33000 + i);
|
||||
RunningClientHandler::check_user_limits_static(user, &config, &stats, peer, &ip_tracker)
|
||||
.await
|
||||
}));
|
||||
}
|
||||
|
||||
let mut _denied = 0usize;
|
||||
for result in futures::future::join_all(tasks).await {
|
||||
match result.unwrap() {
|
||||
Ok(()) => {}
|
||||
Err(ProxyError::ConnectionLimitExceeded { .. }) => _denied += 1,
|
||||
Err(other) => panic!("unexpected error: {other}"),
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_churn_mixed_success_failure_converges_to_zero_state() {
|
||||
let user = "mixed-churn-user";
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 4).await;
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_max_tcp_conns.insert(user.to_string(), 8);
|
||||
|
||||
let mut tasks = Vec::new();
|
||||
for i in 0..200u16 {
|
||||
let stats = Arc::clone(&stats);
|
||||
let ip_tracker = Arc::clone(&ip_tracker);
|
||||
let config = config.clone();
|
||||
|
||||
tasks.push(tokio::spawn(async move {
|
||||
let peer = SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(192, 0, 2, (i % 16 + 1) as u8)),
|
||||
34000 + (i % 32),
|
||||
);
|
||||
let maybe_res = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats,
|
||||
peer,
|
||||
ip_tracker,
|
||||
)
|
||||
.await;
|
||||
|
||||
if let Ok(reservation) = maybe_res {
|
||||
tokio::time::sleep(Duration::from_millis((i % 3) as u64)).await;
|
||||
drop(reservation);
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
futures::future::join_all(tasks).await;
|
||||
ip_tracker.drain_cleanup_queue().await;
|
||||
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_same_ip_parallel_attempts_allow_at_most_one_when_limit_is_one() {
|
||||
let user = "same-ip-user";
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 1).await;
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_max_tcp_conns.insert(user.to_string(), 1);
|
||||
|
||||
let peer: SocketAddr = "203.0.113.44:35555".parse().unwrap();
|
||||
let mut tasks = Vec::new();
|
||||
|
||||
for _ in 0..64 {
|
||||
let stats = Arc::clone(&stats);
|
||||
let ip_tracker = Arc::clone(&ip_tracker);
|
||||
let config = config.clone();
|
||||
tasks.push(tokio::spawn(async move {
|
||||
RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats,
|
||||
peer,
|
||||
ip_tracker,
|
||||
)
|
||||
.await
|
||||
}));
|
||||
}
|
||||
|
||||
let mut granted = 0usize;
|
||||
let mut reservations = Vec::new();
|
||||
for result in futures::future::join_all(tasks).await {
|
||||
match result.unwrap() {
|
||||
Ok(reservation) => {
|
||||
granted += 1;
|
||||
reservations.push(reservation);
|
||||
}
|
||||
Err(ProxyError::ConnectionLimitExceeded { .. }) => {}
|
||||
Err(other) => panic!("unexpected error: {other}"),
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(granted, 1, "only one reservation may be granted for same IP with limit=1");
|
||||
drop(reservations);
|
||||
ip_tracker.drain_cleanup_queue().await;
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_repeat_acquire_release_cycles_never_accumulate_state() {
|
||||
let user = "repeat-cycle-user";
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 32).await;
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_max_tcp_conns.insert(user.to_string(), 32);
|
||||
|
||||
for i in 0..500u16 {
|
||||
let peer = SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(198, 18, (i / 250) as u8, (i % 250 + 1) as u8)),
|
||||
36000 + (i % 128),
|
||||
);
|
||||
let reservation = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
drop(reservation);
|
||||
}
|
||||
|
||||
ip_tracker.drain_cleanup_queue().await;
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1322,13 +1322,20 @@ async fn valid_tls_with_invalid_mtproto_falls_back_to_mask_backend() {
|
|||
let client_hello = make_valid_tls_client_hello(&secret, 0);
|
||||
let invalid_mtproto = vec![0u8; crate::protocol::constants::HANDSHAKE_LEN];
|
||||
let tls_app_record = wrap_tls_application_data(&invalid_mtproto);
|
||||
let trailing_tls_payload = b"still-tls-after-fallback".to_vec();
|
||||
let trailing_tls_record = wrap_tls_application_data(&trailing_tls_payload);
|
||||
|
||||
let expected_fallback = client_hello.clone();
|
||||
let expected_trailing_tls_record = trailing_tls_record.clone();
|
||||
let accept_task = tokio::spawn(async move {
|
||||
let (mut stream, _) = listener.accept().await.unwrap();
|
||||
let mut got = vec![0u8; expected_fallback.len()];
|
||||
stream.read_exact(&mut got).await.unwrap();
|
||||
assert_eq!(got, expected_fallback);
|
||||
|
||||
let mut trailing = vec![0u8; expected_trailing_tls_record.len()];
|
||||
stream.read_exact(&mut trailing).await.unwrap();
|
||||
assert_eq!(trailing, expected_trailing_tls_record);
|
||||
});
|
||||
|
||||
let mut cfg = ProxyConfig::default();
|
||||
|
|
@ -1396,6 +1403,7 @@ async fn valid_tls_with_invalid_mtproto_falls_back_to_mask_backend() {
|
|||
assert_eq!(tls_response_head[0], 0x16);
|
||||
|
||||
client_side.write_all(&tls_app_record).await.unwrap();
|
||||
client_side.write_all(&trailing_tls_record).await.unwrap();
|
||||
|
||||
tokio::time::timeout(Duration::from_secs(3), accept_task)
|
||||
.await
|
||||
|
|
@ -1421,13 +1429,20 @@ async fn client_handler_tls_bad_mtproto_is_forwarded_to_mask_backend() {
|
|||
let client_hello = make_valid_tls_client_hello(&secret, 0);
|
||||
let invalid_mtproto = vec![0u8; crate::protocol::constants::HANDSHAKE_LEN];
|
||||
let tls_app_record = wrap_tls_application_data(&invalid_mtproto);
|
||||
let trailing_tls_payload = b"second-tls-record".to_vec();
|
||||
let trailing_tls_record = wrap_tls_application_data(&trailing_tls_payload);
|
||||
|
||||
let expected_fallback = client_hello.clone();
|
||||
let expected_trailing_tls_record = trailing_tls_record.clone();
|
||||
let mask_accept_task = tokio::spawn(async move {
|
||||
let (mut stream, _) = mask_listener.accept().await.unwrap();
|
||||
let mut got = vec![0u8; expected_fallback.len()];
|
||||
stream.read_exact(&mut got).await.unwrap();
|
||||
assert_eq!(got, expected_fallback);
|
||||
|
||||
let mut trailing = vec![0u8; expected_trailing_tls_record.len()];
|
||||
stream.read_exact(&mut trailing).await.unwrap();
|
||||
assert_eq!(trailing, expected_trailing_tls_record);
|
||||
});
|
||||
|
||||
let mut cfg = ProxyConfig::default();
|
||||
|
|
@ -1513,6 +1528,7 @@ async fn client_handler_tls_bad_mtproto_is_forwarded_to_mask_backend() {
|
|||
assert_eq!(tls_response_head[0], 0x16);
|
||||
|
||||
client.write_all(&tls_app_record).await.unwrap();
|
||||
client.write_all(&trailing_tls_record).await.unwrap();
|
||||
|
||||
tokio::time::timeout(Duration::from_secs(3), mask_accept_task)
|
||||
.await
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue