moved tests to subdirs

This commit is contained in:
David Osipov
2026-03-20 22:55:19 +04:00
parent 0eca535955
commit 43d7e6e991
54 changed files with 40 additions and 33 deletions

View File

@@ -0,0 +1,669 @@
use super::*;
use crate::config::ProxyConfig;
use crate::stats::Stats;
use crate::ip_tracker::UserIpTracker;
use crate::error::ProxyError;
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
// ------------------------------------------------------------------
// Priority 3: Massive Concurrency Stress (OWASP ASVS 5.1.6)
// ------------------------------------------------------------------
#[tokio::test]
async fn client_stress_10k_connections_limit_strict() {
let user = "stress-user";
let limit = 512;
let stats = Arc::new(Stats::new());
let ip_tracker = Arc::new(UserIpTracker::new());
let mut config = ProxyConfig::default();
config.access.user_max_tcp_conns.insert(user.to_string(), limit);
let iterations = 1000;
let mut tasks = Vec::new();
for i in 0..iterations {
let stats = Arc::clone(&stats);
let ip_tracker = Arc::clone(&ip_tracker);
let config = config.clone();
let user_str = user.to_string();
tasks.push(tokio::spawn(async move {
let peer = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(127, 0, 0, (i % 254 + 1) as u8)),
10000 + (i % 1000) as u16,
);
match RunningClientHandler::acquire_user_connection_reservation_static(
&user_str,
&config,
stats,
peer,
ip_tracker,
).await {
Ok(res) => Ok(res),
Err(ProxyError::ConnectionLimitExceeded { .. }) => Err(()),
Err(e) => panic!("Unexpected error: {:?}", e),
}
}));
}
let results = futures::future::join_all(tasks).await;
let mut successes = 0;
let mut failures = 0;
let mut reservations = Vec::new();
for res in results {
match res.unwrap() {
Ok(r) => {
successes += 1;
reservations.push(r);
}
Err(_) => failures += 1,
}
}
assert_eq!(successes, limit, "Should allow exactly 'limit' connections");
assert_eq!(failures, iterations - limit, "Should fail the rest with LimitExceeded");
assert_eq!(stats.get_user_curr_connects(user), limit as u64);
drop(reservations);
ip_tracker.drain_cleanup_queue().await;
assert_eq!(stats.get_user_curr_connects(user), 0, "Stats must converge to 0 after all drops");
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0, "IP tracker must converge to 0");
}
// ------------------------------------------------------------------
// Priority 3: IP Tracker Race Stress
// ------------------------------------------------------------------
#[tokio::test]
async fn client_ip_tracker_race_condition_stress() {
let user = "race-user";
let ip_tracker = Arc::new(UserIpTracker::new());
ip_tracker.set_user_limit(user, 100).await;
let iterations = 1000;
let mut tasks = Vec::new();
for i in 0..iterations {
let ip_tracker = Arc::clone(&ip_tracker);
let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, (i % 254 + 1) as u8));
tasks.push(tokio::spawn(async move {
for _ in 0..10 {
if let Ok(()) = ip_tracker.check_and_add("race-user", ip).await {
ip_tracker.remove_ip("race-user", ip).await;
}
}
}));
}
futures::future::join_all(tasks).await;
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0, "IP count must be zero after balanced add/remove burst");
}
#[tokio::test]
async fn client_limit_burst_peak_never_exceeds_cap() {
let user = "peak-cap-user";
let limit = 32;
let attempts = 256;
let stats = Arc::new(Stats::new());
let ip_tracker = Arc::new(UserIpTracker::new());
let mut config = ProxyConfig::default();
config.access.user_max_tcp_conns.insert(user.to_string(), limit);
let peak = Arc::new(AtomicU64::new(0));
let mut tasks = Vec::with_capacity(attempts);
for i in 0..attempts {
let stats = Arc::clone(&stats);
let ip_tracker = Arc::clone(&ip_tracker);
let config = config.clone();
let peak = Arc::clone(&peak);
tasks.push(tokio::spawn(async move {
let peer = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(203, 0, 113, (i % 250 + 1) as u8)),
20000 + i as u16,
);
let acquired = RunningClientHandler::acquire_user_connection_reservation_static(
user,
&config,
stats.clone(),
peer,
ip_tracker,
)
.await;
if let Ok(reservation) = acquired {
let now = stats.get_user_curr_connects(user);
loop {
let prev = peak.load(Ordering::Relaxed);
if now <= prev {
break;
}
if peak
.compare_exchange(prev, now, Ordering::Relaxed, Ordering::Relaxed)
.is_ok()
{
break;
}
}
tokio::time::sleep(Duration::from_millis(2)).await;
drop(reservation);
}
}));
}
futures::future::join_all(tasks).await;
ip_tracker.drain_cleanup_queue().await;
assert!(
peak.load(Ordering::Relaxed) <= limit as u64,
"peak concurrent reservations must not exceed configured cap"
);
assert_eq!(stats.get_user_curr_connects(user), 0);
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
}
#[tokio::test]
async fn client_quota_rejection_never_mutates_live_counters() {
let user = "quota-reject-user";
let stats = Arc::new(Stats::new());
let ip_tracker = Arc::new(UserIpTracker::new());
let mut config = ProxyConfig::default();
config.access.user_data_quota.insert(user.to_string(), 0);
let peer: SocketAddr = "198.51.100.201:31111".parse().unwrap();
let res = RunningClientHandler::acquire_user_connection_reservation_static(
user,
&config,
stats.clone(),
peer,
ip_tracker.clone(),
)
.await;
assert!(matches!(res, Err(ProxyError::DataQuotaExceeded { .. })));
assert_eq!(stats.get_user_curr_connects(user), 0);
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
}
#[tokio::test]
async fn client_expiration_rejection_never_mutates_live_counters() {
let user = "expired-user";
let stats = Arc::new(Stats::new());
let ip_tracker = Arc::new(UserIpTracker::new());
let mut config = ProxyConfig::default();
config
.access
.user_expirations
.insert(user.to_string(), chrono::Utc::now() - chrono::Duration::seconds(1));
let peer: SocketAddr = "198.51.100.202:31112".parse().unwrap();
let res = RunningClientHandler::acquire_user_connection_reservation_static(
user,
&config,
stats.clone(),
peer,
ip_tracker.clone(),
)
.await;
assert!(matches!(res, Err(ProxyError::UserExpired { .. })));
assert_eq!(stats.get_user_curr_connects(user), 0);
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
}
#[tokio::test]
async fn client_ip_limit_failure_rolls_back_counter_exactly() {
let user = "ip-limit-rollback-user";
let stats = Arc::new(Stats::new());
let ip_tracker = Arc::new(UserIpTracker::new());
ip_tracker.set_user_limit(user, 1).await;
let mut config = ProxyConfig::default();
config.access.user_max_tcp_conns.insert(user.to_string(), 16);
let first_peer: SocketAddr = "198.51.100.203:31113".parse().unwrap();
let first = RunningClientHandler::acquire_user_connection_reservation_static(
user,
&config,
stats.clone(),
first_peer,
ip_tracker.clone(),
)
.await
.unwrap();
let second_peer: SocketAddr = "198.51.100.204:31114".parse().unwrap();
let second = RunningClientHandler::acquire_user_connection_reservation_static(
user,
&config,
stats.clone(),
second_peer,
ip_tracker.clone(),
)
.await;
assert!(matches!(second, Err(ProxyError::ConnectionLimitExceeded { .. })));
assert_eq!(stats.get_user_curr_connects(user), 1);
drop(first);
ip_tracker.drain_cleanup_queue().await;
assert_eq!(stats.get_user_curr_connects(user), 0);
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
}
#[tokio::test]
async fn client_parallel_limit_checks_success_path_leaves_no_residue() {
let user = "parallel-check-success-user";
let stats = Arc::new(Stats::new());
let ip_tracker = Arc::new(UserIpTracker::new());
ip_tracker.set_user_limit(user, 128).await;
let mut config = ProxyConfig::default();
config.access.user_max_tcp_conns.insert(user.to_string(), 128);
let mut tasks = Vec::new();
for i in 0..128u16 {
let stats = Arc::clone(&stats);
let ip_tracker = Arc::clone(&ip_tracker);
let config = config.clone();
tasks.push(tokio::spawn(async move {
let peer = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(10, 10, (i / 255) as u8, (i % 255 + 1) as u8)),
32000 + i,
);
RunningClientHandler::check_user_limits_static(user, &config, &stats, peer, &ip_tracker)
.await
}));
}
for result in futures::future::join_all(tasks).await {
assert!(result.unwrap().is_ok());
}
assert_eq!(stats.get_user_curr_connects(user), 0);
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
}
#[tokio::test]
async fn client_parallel_limit_checks_failure_path_leaves_no_residue() {
let user = "parallel-check-failure-user";
let stats = Arc::new(Stats::new());
let ip_tracker = Arc::new(UserIpTracker::new());
ip_tracker.set_user_limit(user, 0).await;
let mut config = ProxyConfig::default();
config.access.user_max_tcp_conns.insert(user.to_string(), 512);
let mut tasks = Vec::new();
for i in 0..64u16 {
let stats = Arc::clone(&stats);
let ip_tracker = Arc::clone(&ip_tracker);
let config = config.clone();
tasks.push(tokio::spawn(async move {
let peer = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(172, 16, 0, (i % 250 + 1) as u8)), 33000 + i);
RunningClientHandler::check_user_limits_static(user, &config, &stats, peer, &ip_tracker)
.await
}));
}
let mut _denied = 0usize;
for result in futures::future::join_all(tasks).await {
match result.unwrap() {
Ok(()) => {}
Err(ProxyError::ConnectionLimitExceeded { .. }) => _denied += 1,
Err(other) => panic!("unexpected error: {other}"),
}
}
assert_eq!(stats.get_user_curr_connects(user), 0);
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
}
#[tokio::test]
async fn client_churn_mixed_success_failure_converges_to_zero_state() {
let user = "mixed-churn-user";
let stats = Arc::new(Stats::new());
let ip_tracker = Arc::new(UserIpTracker::new());
ip_tracker.set_user_limit(user, 4).await;
let mut config = ProxyConfig::default();
config.access.user_max_tcp_conns.insert(user.to_string(), 8);
let mut tasks = Vec::new();
for i in 0..200u16 {
let stats = Arc::clone(&stats);
let ip_tracker = Arc::clone(&ip_tracker);
let config = config.clone();
tasks.push(tokio::spawn(async move {
let peer = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(192, 0, 2, (i % 16 + 1) as u8)),
34000 + (i % 32),
);
let maybe_res = RunningClientHandler::acquire_user_connection_reservation_static(
user,
&config,
stats,
peer,
ip_tracker,
)
.await;
if let Ok(reservation) = maybe_res {
tokio::time::sleep(Duration::from_millis((i % 3) as u64)).await;
drop(reservation);
}
}));
}
futures::future::join_all(tasks).await;
ip_tracker.drain_cleanup_queue().await;
assert_eq!(stats.get_user_curr_connects(user), 0);
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
}
#[tokio::test]
async fn client_same_ip_parallel_attempts_allow_at_most_one_when_limit_is_one() {
let user = "same-ip-user";
let stats = Arc::new(Stats::new());
let ip_tracker = Arc::new(UserIpTracker::new());
ip_tracker.set_user_limit(user, 1).await;
let mut config = ProxyConfig::default();
config.access.user_max_tcp_conns.insert(user.to_string(), 1);
let peer: SocketAddr = "203.0.113.44:35555".parse().unwrap();
let mut tasks = Vec::new();
for _ in 0..64 {
let stats = Arc::clone(&stats);
let ip_tracker = Arc::clone(&ip_tracker);
let config = config.clone();
tasks.push(tokio::spawn(async move {
RunningClientHandler::acquire_user_connection_reservation_static(
user,
&config,
stats,
peer,
ip_tracker,
)
.await
}));
}
let mut granted = 0usize;
let mut reservations = Vec::new();
for result in futures::future::join_all(tasks).await {
match result.unwrap() {
Ok(reservation) => {
granted += 1;
reservations.push(reservation);
}
Err(ProxyError::ConnectionLimitExceeded { .. }) => {}
Err(other) => panic!("unexpected error: {other}"),
}
}
assert_eq!(granted, 1, "only one reservation may be granted for same IP with limit=1");
drop(reservations);
ip_tracker.drain_cleanup_queue().await;
assert_eq!(stats.get_user_curr_connects(user), 0);
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
}
#[tokio::test]
async fn client_repeat_acquire_release_cycles_never_accumulate_state() {
let user = "repeat-cycle-user";
let stats = Arc::new(Stats::new());
let ip_tracker = Arc::new(UserIpTracker::new());
ip_tracker.set_user_limit(user, 32).await;
let mut config = ProxyConfig::default();
config.access.user_max_tcp_conns.insert(user.to_string(), 32);
for i in 0..500u16 {
let peer = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(198, 18, (i / 250) as u8, (i % 250 + 1) as u8)),
36000 + (i % 128),
);
let reservation = RunningClientHandler::acquire_user_connection_reservation_static(
user,
&config,
stats.clone(),
peer,
ip_tracker.clone(),
)
.await
.unwrap();
drop(reservation);
}
ip_tracker.drain_cleanup_queue().await;
assert_eq!(stats.get_user_curr_connects(user), 0);
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
}
#[tokio::test]
async fn client_multi_user_isolation_under_parallel_limit_exhaustion() {
let stats = Arc::new(Stats::new());
let ip_tracker = Arc::new(UserIpTracker::new());
let mut config = ProxyConfig::default();
config.access.user_max_tcp_conns.insert("u1".to_string(), 8);
config.access.user_max_tcp_conns.insert("u2".to_string(), 8);
let mut tasks = Vec::new();
for i in 0..128u16 {
let stats = Arc::clone(&stats);
let ip_tracker = Arc::clone(&ip_tracker);
let config = config.clone();
tasks.push(tokio::spawn(async move {
let user = if i % 2 == 0 { "u1" } else { "u2" };
let peer = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(100, 64, (i / 64) as u8, (i % 64 + 1) as u8)),
37000 + i,
);
RunningClientHandler::acquire_user_connection_reservation_static(
user,
&config,
stats,
peer,
ip_tracker,
)
.await
}));
}
let mut u1_success = 0usize;
let mut u2_success = 0usize;
let mut reservations = Vec::new();
for (idx, result) in futures::future::join_all(tasks).await.into_iter().enumerate() {
let user = if idx % 2 == 0 { "u1" } else { "u2" };
match result.unwrap() {
Ok(reservation) => {
if user == "u1" {
u1_success += 1;
} else {
u2_success += 1;
}
reservations.push(reservation);
}
Err(ProxyError::ConnectionLimitExceeded { .. }) => {}
Err(other) => panic!("unexpected error: {other}"),
}
}
assert_eq!(u1_success, 8, "u1 must get exactly its own configured cap");
assert_eq!(u2_success, 8, "u2 must get exactly its own configured cap");
drop(reservations);
ip_tracker.drain_cleanup_queue().await;
assert_eq!(stats.get_user_curr_connects("u1"), 0);
assert_eq!(stats.get_user_curr_connects("u2"), 0);
}
#[tokio::test]
async fn client_limit_recovery_after_full_rejection_wave() {
let user = "recover-user";
let stats = Arc::new(Stats::new());
let ip_tracker = Arc::new(UserIpTracker::new());
ip_tracker.set_user_limit(user, 1).await;
let mut config = ProxyConfig::default();
config.access.user_max_tcp_conns.insert(user.to_string(), 1);
let first_peer: SocketAddr = "198.51.100.50:38001".parse().unwrap();
let reservation = RunningClientHandler::acquire_user_connection_reservation_static(
user,
&config,
stats.clone(),
first_peer,
ip_tracker.clone(),
)
.await
.unwrap();
for i in 0..64u16 {
let peer = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(198, 51, 100, (i % 60 + 1) as u8)),
38002 + i,
);
let denied = RunningClientHandler::acquire_user_connection_reservation_static(
user,
&config,
stats.clone(),
peer,
ip_tracker.clone(),
)
.await;
assert!(matches!(denied, Err(ProxyError::ConnectionLimitExceeded { .. })));
}
drop(reservation);
ip_tracker.drain_cleanup_queue().await;
assert_eq!(stats.get_user_curr_connects(user), 0);
let recovery_peer: SocketAddr = "198.51.100.200:38999".parse().unwrap();
let recovered = RunningClientHandler::acquire_user_connection_reservation_static(
user,
&config,
stats.clone(),
recovery_peer,
ip_tracker.clone(),
)
.await;
assert!(recovered.is_ok(), "capacity must recover after prior holder drops");
}
#[tokio::test]
async fn client_dual_limit_cross_product_never_leaks_on_reject() {
let user = "dual-limit-user";
let stats = Arc::new(Stats::new());
let ip_tracker = Arc::new(UserIpTracker::new());
ip_tracker.set_user_limit(user, 2).await;
let mut config = ProxyConfig::default();
config.access.user_max_tcp_conns.insert(user.to_string(), 2);
let p1: SocketAddr = "203.0.113.10:39001".parse().unwrap();
let p2: SocketAddr = "203.0.113.11:39002".parse().unwrap();
let r1 = RunningClientHandler::acquire_user_connection_reservation_static(
user,
&config,
stats.clone(),
p1,
ip_tracker.clone(),
)
.await
.unwrap();
let r2 = RunningClientHandler::acquire_user_connection_reservation_static(
user,
&config,
stats.clone(),
p2,
ip_tracker.clone(),
)
.await
.unwrap();
for i in 0..32u16 {
let peer = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(203, 0, 113, (50 + i) as u8)),
39010 + i,
);
let denied = RunningClientHandler::acquire_user_connection_reservation_static(
user,
&config,
stats.clone(),
peer,
ip_tracker.clone(),
)
.await;
assert!(matches!(denied, Err(ProxyError::ConnectionLimitExceeded { .. })));
}
assert_eq!(stats.get_user_curr_connects(user), 2);
drop((r1, r2));
ip_tracker.drain_cleanup_queue().await;
assert_eq!(stats.get_user_curr_connects(user), 0);
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
}
#[tokio::test]
async fn client_check_user_limits_concurrent_churn_no_counter_drift() {
let user = "check-drift-user";
let stats = Arc::new(Stats::new());
let ip_tracker = Arc::new(UserIpTracker::new());
ip_tracker.set_user_limit(user, 64).await;
let mut config = ProxyConfig::default();
config.access.user_max_tcp_conns.insert(user.to_string(), 64);
let mut tasks = Vec::new();
for i in 0..512u16 {
let stats = Arc::clone(&stats);
let ip_tracker = Arc::clone(&ip_tracker);
let config = config.clone();
tasks.push(tokio::spawn(async move {
let peer = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(172, 20, (i / 255) as u8, (i % 255 + 1) as u8)),
40000 + (i % 500),
);
let _ = RunningClientHandler::check_user_limits_static(
user,
&config,
&stats,
peer,
&ip_tracker,
)
.await;
}));
}
for task in futures::future::join_all(tasks).await {
task.unwrap();
}
assert_eq!(stats.get_user_curr_connects(user), 0);
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
}

View File

@@ -0,0 +1,893 @@
use super::*;
use crate::config::{UpstreamConfig, UpstreamType};
use crate::crypto::sha256_hmac;
use crate::protocol::constants::{
HANDSHAKE_LEN,
MAX_TLS_PLAINTEXT_SIZE,
MIN_TLS_CLIENT_HELLO_SIZE,
TLS_RECORD_APPLICATION,
TLS_VERSION,
};
use crate::protocol::tls;
use std::collections::HashSet;
use std::net::SocketAddr;
use std::sync::Arc;
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpListener;
use tokio::time::{Duration, Instant};
struct CampaignHarness {
config: Arc<ProxyConfig>,
stats: Arc<Stats>,
upstream_manager: Arc<UpstreamManager>,
replay_checker: Arc<ReplayChecker>,
buffer_pool: Arc<BufferPool>,
rng: Arc<SecureRandom>,
route_runtime: Arc<RouteRuntimeController>,
ip_tracker: Arc<UserIpTracker>,
beobachten: Arc<BeobachtenStore>,
}
fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
Arc::new(UpstreamManager::new(
vec![UpstreamConfig {
upstream_type: UpstreamType::Direct {
interface: None,
bind_addresses: None,
},
weight: 1,
enabled: true,
scopes: String::new(),
selected_scope: String::new(),
}],
1,
1,
1,
1,
false,
stats,
))
}
fn build_mask_harness(secret_hex: &str, mask_port: u16) -> CampaignHarness {
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = mask_port;
cfg.censorship.mask_proxy_protocol = 0;
cfg.access.ignore_time_skew = true;
cfg.access
.users
.insert("user".to_string(), secret_hex.to_string());
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
CampaignHarness {
config,
stats: stats.clone(),
upstream_manager: new_upstream_manager(stats),
replay_checker: Arc::new(ReplayChecker::new(1024, Duration::from_secs(60))),
buffer_pool: Arc::new(BufferPool::new()),
rng: Arc::new(SecureRandom::new()),
route_runtime: Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
ip_tracker: Arc::new(UserIpTracker::new()),
beobachten: Arc::new(BeobachtenStore::new()),
}
}
fn make_valid_tls_client_hello(secret: &[u8], timestamp: u32, tls_len: usize, fill: u8) -> Vec<u8> {
assert!(tls_len <= u16::MAX as usize, "TLS length must fit into record header");
let total_len = 5 + tls_len;
let mut handshake = vec![fill; total_len];
handshake[0] = 0x16;
handshake[1] = 0x03;
handshake[2] = 0x01;
handshake[3..5].copy_from_slice(&(tls_len as u16).to_be_bytes());
let session_id_len: usize = 32;
handshake[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = session_id_len as u8;
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].fill(0);
let computed = sha256_hmac(secret, &handshake);
let mut digest = computed;
let ts = timestamp.to_le_bytes();
for i in 0..4 {
digest[28 + i] ^= ts[i];
}
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN]
.copy_from_slice(&digest);
handshake
}
fn wrap_tls_record(record_type: u8, payload: &[u8]) -> Vec<u8> {
let mut record = Vec::with_capacity(5 + payload.len());
record.push(record_type);
record.extend_from_slice(&TLS_VERSION);
record.extend_from_slice(&(payload.len() as u16).to_be_bytes());
record.extend_from_slice(payload);
record
}
fn wrap_tls_application_data(payload: &[u8]) -> Vec<u8> {
wrap_tls_record(TLS_RECORD_APPLICATION, payload)
}
async fn read_and_discard_tls_record_body<T>(stream: &mut T, header: [u8; 5])
where
T: tokio::io::AsyncRead + Unpin,
{
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
let mut body = vec![0u8; len];
stream.read_exact(&mut body).await.unwrap();
}
async fn run_tls_success_mtproto_fail_capture(
harness: CampaignHarness,
peer: SocketAddr,
client_hello: Vec<u8>,
bad_mtproto_record: Vec<u8>,
trailing_records: Vec<Vec<u8>>,
expected_forward: Vec<u8>,
) {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let mut cfg = (*harness.config).clone();
cfg.censorship.mask_port = backend_addr.port();
let cfg = Arc::new(cfg);
let expected = expected_forward.clone();
let accept_task = tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut got = vec![0u8; expected.len()];
stream.read_exact(&mut got).await.unwrap();
got
});
let (server_side, mut client_side) = duplex(262144);
let handler = tokio::spawn(handle_client_stream(
server_side,
peer,
cfg,
harness.stats,
harness.upstream_manager,
harness.replay_checker,
harness.buffer_pool,
harness.rng,
None,
harness.route_runtime,
None,
harness.ip_tracker,
harness.beobachten,
false,
));
client_side.write_all(&client_hello).await.unwrap();
let mut tls_response_head = [0u8; 5];
client_side.read_exact(&mut tls_response_head).await.unwrap();
assert_eq!(tls_response_head[0], 0x16);
read_and_discard_tls_record_body(&mut client_side, tls_response_head).await;
client_side.write_all(&bad_mtproto_record).await.unwrap();
for record in trailing_records {
client_side.write_all(&record).await.unwrap();
}
let got = tokio::time::timeout(Duration::from_secs(4), accept_task)
.await
.unwrap()
.unwrap();
assert_eq!(got, expected_forward);
client_side.shutdown().await.unwrap();
let result = tokio::time::timeout(Duration::from_secs(4), handler)
.await
.unwrap()
.unwrap();
assert!(result.is_ok());
}
async fn run_invalid_tls_capture(config: Arc<ProxyConfig>, payload: Vec<u8>, expected: Vec<u8>) {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let mut cfg = (*config).clone();
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = backend_addr.port();
let cfg = Arc::new(cfg);
let expected_probe = expected.clone();
let accept_task = tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut got = vec![0u8; expected_probe.len()];
stream.read_exact(&mut got).await.unwrap();
got
});
let stats = Arc::new(Stats::new());
let (server_side, mut client_side) = duplex(65536);
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.77:45001".parse().unwrap(),
cfg,
stats,
new_upstream_manager(Arc::new(Stats::new())),
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
Arc::new(BufferPool::new()),
Arc::new(SecureRandom::new()),
None,
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
None,
Arc::new(UserIpTracker::new()),
Arc::new(BeobachtenStore::new()),
false,
));
client_side.write_all(&payload).await.unwrap();
client_side.shutdown().await.unwrap();
let got = tokio::time::timeout(Duration::from_secs(4), accept_task)
.await
.unwrap()
.unwrap();
assert_eq!(got, expected);
let result = tokio::time::timeout(Duration::from_secs(4), handler)
.await
.unwrap()
.unwrap();
assert!(result.is_ok());
}
#[tokio::test]
async fn blackhat_campaign_01_tail_only_record_is_forwarded_after_tls_success_mtproto_fail() {
let secret = [0xA1u8; 16];
let harness = build_mask_harness("a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1", 1);
let client_hello = make_valid_tls_client_hello(&secret, 11, 600, 0x41);
let bad_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
let tail = wrap_tls_application_data(b"blackhat-tail-01");
run_tls_success_mtproto_fail_capture(
harness,
"198.51.100.1:55001".parse().unwrap(),
client_hello,
bad_record,
vec![tail.clone()],
tail,
)
.await;
}
#[tokio::test]
async fn blackhat_campaign_02_two_ordered_records_preserved_after_fallback() {
let secret = [0xA2u8; 16];
let harness = build_mask_harness("a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2", 1);
let client_hello = make_valid_tls_client_hello(&secret, 12, 600, 0x42);
let bad_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
let r1 = wrap_tls_application_data(b"first");
let r2 = wrap_tls_application_data(b"second");
let expected = [r1.clone(), r2.clone()].concat();
run_tls_success_mtproto_fail_capture(
harness,
"198.51.100.2:55002".parse().unwrap(),
client_hello,
bad_record,
vec![r1, r2],
expected,
)
.await;
}
#[tokio::test]
async fn blackhat_campaign_03_large_tls_application_record_survives_fallback() {
let secret = [0xA3u8; 16];
let harness = build_mask_harness("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3", 1);
let client_hello = make_valid_tls_client_hello(&secret, 13, 600, 0x43);
let bad_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
let big_payload = vec![0x5Au8; MAX_TLS_PLAINTEXT_SIZE];
let big_record = wrap_tls_application_data(&big_payload);
run_tls_success_mtproto_fail_capture(
harness,
"198.51.100.3:55003".parse().unwrap(),
client_hello,
bad_record,
vec![big_record.clone()],
big_record,
)
.await;
}
#[tokio::test]
async fn blackhat_campaign_04_coalesced_tail_in_failed_record_is_reframed_and_forwarded() {
let secret = [0xA4u8; 16];
let harness = build_mask_harness("a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4", 1);
let client_hello = make_valid_tls_client_hello(&secret, 14, 600, 0x44);
let coalesced_tail = b"coalesced-tail-blackhat".to_vec();
let mut bad_payload = vec![0u8; HANDSHAKE_LEN];
bad_payload.extend_from_slice(&coalesced_tail);
let bad_record = wrap_tls_application_data(&bad_payload);
let expected = wrap_tls_application_data(&coalesced_tail);
run_tls_success_mtproto_fail_capture(
harness,
"198.51.100.4:55004".parse().unwrap(),
client_hello,
bad_record,
Vec::new(),
expected,
)
.await;
}
#[tokio::test]
async fn blackhat_campaign_05_coalesced_tail_plus_next_record_keep_wire_order() {
let secret = [0xA5u8; 16];
let harness = build_mask_harness("a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5", 1);
let client_hello = make_valid_tls_client_hello(&secret, 15, 600, 0x45);
let coalesced_tail = b"inline-tail".to_vec();
let mut bad_payload = vec![0u8; HANDSHAKE_LEN];
bad_payload.extend_from_slice(&coalesced_tail);
let bad_record = wrap_tls_application_data(&bad_payload);
let next_record = wrap_tls_application_data(b"next-record");
let expected = [
wrap_tls_application_data(&coalesced_tail),
next_record.clone(),
]
.concat();
run_tls_success_mtproto_fail_capture(
harness,
"198.51.100.5:55005".parse().unwrap(),
client_hello,
bad_record,
vec![next_record],
expected,
)
.await;
}
#[tokio::test]
async fn blackhat_campaign_06_replayed_tls_hello_is_masked_without_serverhello() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let harness = build_mask_harness("a6a6a6a6a6a6a6a6a6a6a6a6a6a6a6a6", backend_addr.port());
let replay_checker = harness.replay_checker.clone();
let client_hello = make_valid_tls_client_hello(&[0xA6; 16], 16, 600, 0x46);
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
let first_tail = wrap_tls_application_data(b"seed-tail");
let expected_hello = client_hello.clone();
let expected_tail = first_tail.clone();
let accept_task = tokio::spawn(async move {
let (mut s1, _) = listener.accept().await.unwrap();
let mut got_tail = vec![0u8; expected_tail.len()];
s1.read_exact(&mut got_tail).await.unwrap();
assert_eq!(got_tail, expected_tail);
drop(s1);
let (mut s2, _) = listener.accept().await.unwrap();
let mut got_hello = vec![0u8; expected_hello.len()];
s2.read_exact(&mut got_hello).await.unwrap();
got_hello
});
let run_one = |checker: Arc<ReplayChecker>, send_mtproto: bool| {
let mut cfg = (*harness.config).clone();
cfg.censorship.mask_port = backend_addr.port();
let cfg = Arc::new(cfg);
let hello = client_hello.clone();
let invalid_mtproto_record = invalid_mtproto_record.clone();
let first_tail = first_tail.clone();
let stats = harness.stats.clone();
let upstream = harness.upstream_manager.clone();
let pool = harness.buffer_pool.clone();
let rng = harness.rng.clone();
let route = harness.route_runtime.clone();
let ipt = harness.ip_tracker.clone();
let beob = harness.beobachten.clone();
async move {
let (server_side, mut client_side) = duplex(131072);
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.6:55006".parse().unwrap(),
cfg,
stats,
upstream,
checker,
pool,
rng,
None,
route,
None,
ipt,
beob,
false,
));
client_side.write_all(&hello).await.unwrap();
if send_mtproto {
let mut head = [0u8; 5];
client_side.read_exact(&mut head).await.unwrap();
assert_eq!(head[0], 0x16);
read_and_discard_tls_record_body(&mut client_side, head).await;
client_side.write_all(&invalid_mtproto_record).await.unwrap();
client_side.write_all(&first_tail).await.unwrap();
} else {
let mut one = [0u8; 1];
let no_server_hello = tokio::time::timeout(
Duration::from_millis(300),
client_side.read_exact(&mut one),
)
.await;
assert!(no_server_hello.is_err() || no_server_hello.unwrap().is_err());
}
client_side.shutdown().await.unwrap();
let result = tokio::time::timeout(Duration::from_secs(4), handler)
.await
.unwrap()
.unwrap();
assert!(result.is_ok());
}
};
run_one(replay_checker.clone(), true).await;
run_one(replay_checker, false).await;
let got = tokio::time::timeout(Duration::from_secs(4), accept_task)
.await
.unwrap()
.unwrap();
assert_eq!(got, client_hello);
}
#[tokio::test]
async fn blackhat_campaign_07_truncated_clienthello_exact_prefix_is_forwarded() {
let mut payload = vec![0u8; 5 + 37];
payload[0] = 0x16;
payload[1] = 0x03;
payload[2] = 0x01;
payload[3..5].copy_from_slice(&600u16.to_be_bytes());
payload[5..].fill(0x71);
run_invalid_tls_capture(Arc::new(ProxyConfig::default()), payload.clone(), payload).await;
}
#[tokio::test]
async fn blackhat_campaign_08_out_of_bounds_len_forwards_header_only() {
let header = vec![0x16, 0x03, 0x01, 0xFF, 0xFF];
run_invalid_tls_capture(Arc::new(ProxyConfig::default()), header.clone(), header).await;
}
#[tokio::test]
async fn blackhat_campaign_09_fragmented_header_then_partial_body_masks_seen_bytes_only() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let mut cfg = ProxyConfig::default();
cfg.censorship.mask = true;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = backend_addr.port();
cfg.censorship.mask_unix_sock = None;
let expected = {
let mut x = vec![0u8; 5 + 11];
x[0] = 0x16;
x[1] = 0x03;
x[2] = 0x01;
x[3..5].copy_from_slice(&600u16.to_be_bytes());
x[5..].fill(0xCC);
x
};
let accept_task = tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut got = vec![0u8; expected.len()];
stream.read_exact(&mut got).await.unwrap();
got
});
let (server_side, mut client_side) = duplex(65536);
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.9:55009".parse().unwrap(),
Arc::new(cfg),
Arc::new(Stats::new()),
new_upstream_manager(Arc::new(Stats::new())),
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
Arc::new(BufferPool::new()),
Arc::new(SecureRandom::new()),
None,
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
None,
Arc::new(UserIpTracker::new()),
Arc::new(BeobachtenStore::new()),
false,
));
client_side.write_all(&[0x16, 0x03]).await.unwrap();
client_side.write_all(&[0x01, 0x02, 0x58]).await.unwrap();
client_side.write_all(&vec![0xCC; 11]).await.unwrap();
client_side.shutdown().await.unwrap();
let got = tokio::time::timeout(Duration::from_secs(4), accept_task)
.await
.unwrap()
.unwrap();
assert_eq!(got.len(), 16);
let result = tokio::time::timeout(Duration::from_secs(4), handler)
.await
.unwrap()
.unwrap();
assert!(result.is_ok());
}
#[tokio::test]
async fn blackhat_campaign_10_zero_handshake_timeout_with_delay_still_avoids_timeout_counter() {
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = 1;
cfg.timeouts.client_handshake = 0;
cfg.censorship.server_hello_delay_min_ms = 700;
cfg.censorship.server_hello_delay_max_ms = 700;
let stats = Arc::new(Stats::new());
let (server_side, mut client_side) = duplex(4096);
let started = Instant::now();
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.10:55010".parse().unwrap(),
Arc::new(cfg),
stats.clone(),
new_upstream_manager(Arc::new(Stats::new())),
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
Arc::new(BufferPool::new()),
Arc::new(SecureRandom::new()),
None,
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
None,
Arc::new(UserIpTracker::new()),
Arc::new(BeobachtenStore::new()),
false,
));
let mut invalid = vec![0u8; 5 + 700];
invalid[0] = 0x16;
invalid[1] = 0x03;
invalid[2] = 0x01;
invalid[3..5].copy_from_slice(&700u16.to_be_bytes());
invalid[5..].fill(0x66);
client_side.write_all(&invalid).await.unwrap();
client_side.shutdown().await.unwrap();
let result = tokio::time::timeout(Duration::from_secs(4), handler)
.await
.unwrap()
.unwrap();
assert!(result.is_ok());
assert_eq!(stats.get_handshake_timeouts(), 0);
assert!(started.elapsed() >= Duration::from_millis(650));
}
#[tokio::test]
async fn blackhat_campaign_11_parallel_bad_tls_probes_all_masked_without_timeouts() {
let n = 24usize;
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let mut cfg = ProxyConfig::default();
cfg.censorship.mask = true;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_port = backend_addr.port();
let stats = Arc::new(Stats::new());
let accept_task = tokio::spawn(async move {
let mut seen = HashSet::new();
for _ in 0..n {
let (mut stream, _) = listener.accept().await.unwrap();
let mut hdr = [0u8; 5];
stream.read_exact(&mut hdr).await.unwrap();
seen.insert(hdr.to_vec());
}
seen
});
let mut tasks = Vec::new();
for i in 0..n {
let mut hdr = [0u8; 5];
hdr[0] = 0x16;
hdr[1] = 0x03;
hdr[2] = 0x01;
hdr[3] = 0xFF;
hdr[4] = i as u8;
let cfg = Arc::new(cfg.clone());
let stats = stats.clone();
tasks.push(tokio::spawn(async move {
let (server_side, mut client_side) = duplex(4096);
let handler = tokio::spawn(handle_client_stream(
server_side,
format!("198.51.100.11:{}", 56000 + i).parse().unwrap(),
cfg,
stats,
new_upstream_manager(Arc::new(Stats::new())),
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
Arc::new(BufferPool::new()),
Arc::new(SecureRandom::new()),
None,
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
None,
Arc::new(UserIpTracker::new()),
Arc::new(BeobachtenStore::new()),
false,
));
client_side.write_all(&hdr).await.unwrap();
client_side.shutdown().await.unwrap();
let result = tokio::time::timeout(Duration::from_secs(4), handler)
.await
.unwrap()
.unwrap();
assert!(result.is_ok());
hdr.to_vec()
}));
}
let mut expected = HashSet::new();
for t in tasks {
expected.insert(t.await.unwrap());
}
let seen = tokio::time::timeout(Duration::from_secs(6), accept_task)
.await
.unwrap()
.unwrap();
assert_eq!(seen, expected);
assert_eq!(stats.get_handshake_timeouts(), 0);
}
#[tokio::test]
async fn blackhat_campaign_12_parallel_tls_success_mtproto_fail_sessions_keep_isolation() {
let sessions = 16usize;
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let mut expected = HashSet::new();
for i in 0..sessions {
let rec = wrap_tls_application_data(&vec![i as u8; 8 + i]);
expected.insert(rec);
}
let accept_task = tokio::spawn(async move {
let mut got_set = HashSet::new();
for _ in 0..sessions {
let (mut stream, _) = listener.accept().await.unwrap();
let mut head = [0u8; 5];
stream.read_exact(&mut head).await.unwrap();
let len = u16::from_be_bytes([head[3], head[4]]) as usize;
let mut rec = vec![0u8; 5 + len];
rec[..5].copy_from_slice(&head);
stream.read_exact(&mut rec[5..]).await.unwrap();
got_set.insert(rec);
}
got_set
});
let mut tasks = Vec::new();
for i in 0..sessions {
let mut harness = build_mask_harness("abababababababababababababababab", backend_addr.port());
let mut cfg = (*harness.config).clone();
cfg.censorship.mask_port = backend_addr.port();
harness.config = Arc::new(cfg);
tasks.push(tokio::spawn(async move {
let secret = [0xABu8; 16];
let hello = make_valid_tls_client_hello(&secret, 100 + i as u32, 600, 0x40 + (i as u8 % 10));
let bad = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
let tail = wrap_tls_application_data(&vec![i as u8; 8 + i]);
let (server_side, mut client_side) = duplex(131072);
let handler = tokio::spawn(handle_client_stream(
server_side,
format!("198.51.100.12:{}", 56100 + i).parse().unwrap(),
harness.config,
harness.stats,
harness.upstream_manager,
harness.replay_checker,
harness.buffer_pool,
harness.rng,
None,
harness.route_runtime,
None,
harness.ip_tracker,
harness.beobachten,
false,
));
client_side.write_all(&hello).await.unwrap();
let mut head = [0u8; 5];
client_side.read_exact(&mut head).await.unwrap();
read_and_discard_tls_record_body(&mut client_side, head).await;
client_side.write_all(&bad).await.unwrap();
client_side.write_all(&tail).await.unwrap();
client_side.shutdown().await.unwrap();
let result = tokio::time::timeout(Duration::from_secs(5), handler)
.await
.unwrap()
.unwrap();
assert!(result.is_ok());
tail
}));
}
let mut produced = HashSet::new();
for t in tasks {
produced.insert(t.await.unwrap());
}
let observed = tokio::time::timeout(Duration::from_secs(8), accept_task)
.await
.unwrap()
.unwrap();
assert_eq!(produced, expected);
assert_eq!(observed, expected);
}
#[tokio::test]
async fn blackhat_campaign_13_backend_down_does_not_escalate_to_handshake_timeout() {
let mut cfg = ProxyConfig::default();
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = 1;
cfg.timeouts.client_handshake = 1;
let stats = Arc::new(Stats::new());
let (server_side, mut client_side) = duplex(4096);
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.13:55013".parse().unwrap(),
Arc::new(cfg),
stats.clone(),
new_upstream_manager(Arc::new(Stats::new())),
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
Arc::new(BufferPool::new()),
Arc::new(SecureRandom::new()),
None,
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
None,
Arc::new(UserIpTracker::new()),
Arc::new(BeobachtenStore::new()),
false,
));
let bad = vec![0x16, 0x03, 0x01, 0xFF, 0x00];
client_side.write_all(&bad).await.unwrap();
client_side.shutdown().await.unwrap();
let result = tokio::time::timeout(Duration::from_secs(4), handler)
.await
.unwrap()
.unwrap();
assert!(result.is_ok());
assert_eq!(stats.get_handshake_timeouts(), 0);
}
#[tokio::test]
async fn blackhat_campaign_14_masking_disabled_path_finishes_cleanly() {
let mut cfg = ProxyConfig::default();
cfg.censorship.mask = false;
cfg.timeouts.client_handshake = 1;
let stats = Arc::new(Stats::new());
let (server_side, mut client_side) = duplex(4096);
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.14:55014".parse().unwrap(),
Arc::new(cfg),
stats.clone(),
new_upstream_manager(Arc::new(Stats::new())),
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
Arc::new(BufferPool::new()),
Arc::new(SecureRandom::new()),
None,
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
None,
Arc::new(UserIpTracker::new()),
Arc::new(BeobachtenStore::new()),
false,
));
let bad = vec![0x16, 0x03, 0x01, 0xFF, 0xF0];
client_side.write_all(&bad).await.unwrap();
client_side.shutdown().await.unwrap();
let result = tokio::time::timeout(Duration::from_secs(4), handler)
.await
.unwrap()
.unwrap();
assert!(result.is_ok());
assert_eq!(stats.get_handshake_timeouts(), 0);
}
#[tokio::test]
async fn blackhat_campaign_15_light_fuzz_tls_lengths_and_fragmentation() {
let mut seed = 0x9E3779B97F4A7C15u64;
for idx in 0..20u16 {
seed = seed.wrapping_mul(6364136223846793005).wrapping_add(1);
let mut tls_len = (seed as usize) % 20000;
if idx % 3 == 0 {
tls_len = MAX_TLS_PLAINTEXT_SIZE + 1 + (tls_len % 1024);
}
let body_to_send = if (MIN_TLS_CLIENT_HELLO_SIZE..=MAX_TLS_PLAINTEXT_SIZE).contains(&tls_len)
{
(seed as usize % 29).min(tls_len.saturating_sub(1))
} else {
0
};
let mut probe = vec![0u8; 5 + body_to_send];
probe[0] = 0x16;
probe[1] = 0x03;
probe[2] = 0x01;
probe[3..5].copy_from_slice(&(tls_len as u16).to_be_bytes());
for b in &mut probe[5..] {
seed = seed.wrapping_mul(2862933555777941757).wrapping_add(3037000493);
*b = (seed >> 24) as u8;
}
let expected = probe.clone();
run_invalid_tls_capture(Arc::new(ProxyConfig::default()), probe, expected).await;
}
}
#[tokio::test]
async fn blackhat_campaign_16_mixed_probe_burst_stress_finishes_without_panics() {
let cases = 18usize;
let mut tasks = Vec::new();
for i in 0..cases {
tasks.push(tokio::spawn(async move {
if i % 2 == 0 {
let mut probe = vec![0u8; 5 + (i % 13)];
probe[0] = 0x16;
probe[1] = 0x03;
probe[2] = 0x01;
probe[3..5].copy_from_slice(&600u16.to_be_bytes());
probe[5..].fill((0x90 + i as u8) ^ 0x5A);
run_invalid_tls_capture(Arc::new(ProxyConfig::default()), probe.clone(), probe).await;
} else {
let hdr = vec![0x16, 0x03, 0x01, 0xFF, i as u8];
run_invalid_tls_capture(Arc::new(ProxyConfig::default()), hdr.clone(), hdr).await;
}
}));
}
for task in tasks {
task.await.unwrap();
}
}

View File

@@ -0,0 +1,244 @@
use super::*;
use crate::config::{UpstreamConfig, UpstreamType};
use crate::crypto::sha256_hmac;
use crate::protocol::constants::{HANDSHAKE_LEN, TLS_VERSION};
use crate::protocol::tls;
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpListener;
use tokio::time::{Duration, Instant};
struct PipelineHarness {
config: Arc<ProxyConfig>,
stats: Arc<Stats>,
upstream_manager: Arc<UpstreamManager>,
replay_checker: Arc<ReplayChecker>,
buffer_pool: Arc<BufferPool>,
rng: Arc<SecureRandom>,
route_runtime: Arc<RouteRuntimeController>,
ip_tracker: Arc<UserIpTracker>,
beobachten: Arc<BeobachtenStore>,
}
fn build_harness(config: ProxyConfig) -> PipelineHarness {
let config = Arc::new(config);
let stats = Arc::new(Stats::new());
let upstream_manager = Arc::new(UpstreamManager::new(
vec![UpstreamConfig {
upstream_type: UpstreamType::Direct {
interface: None,
bind_addresses: None,
},
weight: 1,
enabled: true,
scopes: String::new(),
selected_scope: String::new(),
}],
1,
1,
1,
1,
false,
stats.clone(),
));
PipelineHarness {
config,
stats,
upstream_manager,
replay_checker: Arc::new(ReplayChecker::new(256, Duration::from_secs(60))),
buffer_pool: Arc::new(BufferPool::new()),
rng: Arc::new(SecureRandom::new()),
route_runtime: Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
ip_tracker: Arc::new(UserIpTracker::new()),
beobachten: Arc::new(BeobachtenStore::new()),
}
}
fn make_valid_tls_client_hello(secret: &[u8], timestamp: u32, tls_len: usize, fill: u8) -> Vec<u8> {
assert!(tls_len <= u16::MAX as usize, "TLS length must fit into record header");
let total_len = 5 + tls_len;
let mut handshake = vec![fill; total_len];
handshake[0] = 0x16;
handshake[1] = 0x03;
handshake[2] = 0x01;
handshake[3..5].copy_from_slice(&(tls_len as u16).to_be_bytes());
let session_id_len: usize = 32;
handshake[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = session_id_len as u8;
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].fill(0);
let computed = sha256_hmac(secret, &handshake);
let mut digest = computed;
let ts = timestamp.to_le_bytes();
for i in 0..4 {
digest[28 + i] ^= ts[i];
}
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN]
.copy_from_slice(&digest);
handshake
}
fn wrap_tls_application_data(payload: &[u8]) -> Vec<u8> {
let mut record = Vec::with_capacity(5 + payload.len());
record.push(0x17);
record.extend_from_slice(&TLS_VERSION);
record.extend_from_slice(&(payload.len() as u16).to_be_bytes());
record.extend_from_slice(payload);
record
}
async fn read_and_discard_tls_record_body<T>(stream: &mut T, header: [u8; 5])
where
T: tokio::io::AsyncRead + Unpin,
{
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
let mut body = vec![0u8; len];
stream.read_exact(&mut body).await.unwrap();
}
#[tokio::test]
async fn masking_runs_outside_handshake_timeout_budget_with_high_reject_delay() {
let mut config = ProxyConfig::default();
config.general.beobachten = false;
config.censorship.mask = true;
config.censorship.mask_unix_sock = None;
config.censorship.mask_host = Some("127.0.0.1".to_string());
config.censorship.mask_port = 1;
config.timeouts.client_handshake = 0;
config.censorship.server_hello_delay_min_ms = 730;
config.censorship.server_hello_delay_max_ms = 730;
let harness = build_harness(config);
let stats = harness.stats.clone();
let (server_side, mut client_side) = duplex(4096);
let peer: SocketAddr = "198.51.100.241:56541".parse().unwrap();
let handler = tokio::spawn(handle_client_stream(
server_side,
peer,
harness.config,
harness.stats,
harness.upstream_manager,
harness.replay_checker,
harness.buffer_pool,
harness.rng,
None,
harness.route_runtime,
None,
harness.ip_tracker,
harness.beobachten,
false,
));
let mut invalid_hello = vec![0u8; 5 + 600];
invalid_hello[0] = 0x16;
invalid_hello[1] = 0x03;
invalid_hello[2] = 0x01;
invalid_hello[3..5].copy_from_slice(&600u16.to_be_bytes());
invalid_hello[5..].fill(0x44);
let started = Instant::now();
client_side.write_all(&invalid_hello).await.unwrap();
client_side.shutdown().await.unwrap();
let result = tokio::time::timeout(Duration::from_secs(3), handler)
.await
.unwrap()
.unwrap();
assert!(result.is_ok(), "bad-client fallback must not be canceled by handshake timeout");
assert_eq!(
stats.get_handshake_timeouts(),
0,
"masking fallback path must not increment handshake timeout counter"
);
assert!(
started.elapsed() >= Duration::from_millis(700),
"configured reject delay should still be visible before masking"
);
}
#[tokio::test]
async fn tls_mtproto_bad_client_does_not_reinject_clienthello_into_mask_backend() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let mut config = ProxyConfig::default();
config.general.beobachten = false;
config.censorship.mask = true;
config.censorship.mask_unix_sock = None;
config.censorship.mask_host = Some("127.0.0.1".to_string());
config.censorship.mask_port = backend_addr.port();
config.censorship.mask_proxy_protocol = 0;
config.access.ignore_time_skew = true;
config
.access
.users
.insert("user".to_string(), "d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0".to_string());
let harness = build_harness(config);
let secret = [0xD0u8; 16];
let client_hello = make_valid_tls_client_hello(&secret, 0, 600, 0x41);
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
let trailing_record = wrap_tls_application_data(b"no-clienthello-reinject");
let expected_trailing = trailing_record.clone();
let accept_task = tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut got = vec![0u8; expected_trailing.len()];
stream.read_exact(&mut got).await.unwrap();
assert_eq!(
got,
expected_trailing,
"mask backend must receive only post-handshake trailing TLS records"
);
});
let (server_side, mut client_side) = duplex(131072);
let peer: SocketAddr = "198.51.100.242:56542".parse().unwrap();
let handler = tokio::spawn(handle_client_stream(
server_side,
peer,
harness.config,
harness.stats,
harness.upstream_manager,
harness.replay_checker,
harness.buffer_pool,
harness.rng,
None,
harness.route_runtime,
None,
harness.ip_tracker,
harness.beobachten,
false,
));
client_side.write_all(&client_hello).await.unwrap();
let mut tls_response_head = [0u8; 5];
client_side.read_exact(&mut tls_response_head).await.unwrap();
assert_eq!(tls_response_head[0], 0x16);
read_and_discard_tls_record_body(&mut client_side, tls_response_head).await;
client_side.write_all(&invalid_mtproto_record).await.unwrap();
client_side.write_all(&trailing_record).await.unwrap();
tokio::time::timeout(Duration::from_secs(3), accept_task)
.await
.unwrap()
.unwrap();
drop(client_side);
let result = tokio::time::timeout(Duration::from_secs(3), handler)
.await
.unwrap()
.unwrap();
assert!(result.is_ok());
}

View File

@@ -0,0 +1,192 @@
use super::*;
use crate::config::{UpstreamConfig, UpstreamType};
use std::sync::Arc;
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpListener;
use tokio::time::{Duration, Instant};
fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
Arc::new(UpstreamManager::new(
vec![UpstreamConfig {
upstream_type: UpstreamType::Direct {
interface: None,
bind_addresses: None,
},
weight: 1,
enabled: true,
scopes: String::new(),
selected_scope: String::new(),
}],
1,
1,
1,
1,
false,
stats,
))
}
fn percentile_ms(mut values: Vec<u128>, p_num: usize, p_den: usize) -> u128 {
values.sort_unstable();
if values.is_empty() {
return 0;
}
let idx = ((values.len() - 1) * p_num) / p_den;
values[idx]
}
async fn measure_reject_duration_ms(body_sent: usize) -> u128 {
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = 1;
cfg.timeouts.client_handshake = 1;
cfg.censorship.server_hello_delay_min_ms = 700;
cfg.censorship.server_hello_delay_max_ms = 700;
let (server_side, mut client_side) = duplex(65536);
let started = Instant::now();
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.170:56170".parse().unwrap(),
Arc::new(cfg),
Arc::new(Stats::new()),
new_upstream_manager(Arc::new(Stats::new())),
Arc::new(ReplayChecker::new(256, Duration::from_secs(60))),
Arc::new(BufferPool::new()),
Arc::new(SecureRandom::new()),
None,
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
None,
Arc::new(UserIpTracker::new()),
Arc::new(BeobachtenStore::new()),
false,
));
let mut probe = vec![0u8; 5 + body_sent];
probe[0] = 0x16;
probe[1] = 0x03;
probe[2] = 0x01;
probe[3..5].copy_from_slice(&600u16.to_be_bytes());
probe[5..].fill(0xA7);
client_side.write_all(&probe).await.unwrap();
client_side.shutdown().await.unwrap();
let result = tokio::time::timeout(Duration::from_secs(4), handler)
.await
.unwrap()
.unwrap();
assert!(result.is_ok());
started.elapsed().as_millis()
}
async fn capture_forwarded_len(body_sent: usize) -> usize {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = backend_addr.port();
cfg.timeouts.client_handshake = 1;
let accept_task = tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut got = Vec::new();
let _ = tokio::time::timeout(Duration::from_secs(2), stream.read_to_end(&mut got)).await;
got.len()
});
let (server_side, mut client_side) = duplex(65536);
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.171:56171".parse().unwrap(),
Arc::new(cfg),
Arc::new(Stats::new()),
new_upstream_manager(Arc::new(Stats::new())),
Arc::new(ReplayChecker::new(256, Duration::from_secs(60))),
Arc::new(BufferPool::new()),
Arc::new(SecureRandom::new()),
None,
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
None,
Arc::new(UserIpTracker::new()),
Arc::new(BeobachtenStore::new()),
false,
));
let mut probe = vec![0u8; 5 + body_sent];
probe[0] = 0x16;
probe[1] = 0x03;
probe[2] = 0x01;
probe[3..5].copy_from_slice(&600u16.to_be_bytes());
probe[5..].fill(0xB4);
client_side.write_all(&probe).await.unwrap();
client_side.shutdown().await.unwrap();
let result = tokio::time::timeout(Duration::from_secs(4), handler)
.await
.unwrap()
.unwrap();
assert!(result.is_ok());
tokio::time::timeout(Duration::from_secs(4), accept_task)
.await
.unwrap()
.unwrap()
}
#[tokio::test]
async fn diagnostic_timing_profiles_are_within_realistic_guardrails() {
let classes = [17usize, 511usize, 1023usize, 4095usize];
for class in classes {
let mut samples = Vec::new();
for _ in 0..8 {
samples.push(measure_reject_duration_ms(class).await);
}
let p50 = percentile_ms(samples.clone(), 50, 100);
let p95 = percentile_ms(samples.clone(), 95, 100);
let max = *samples.iter().max().unwrap();
println!(
"diagnostic_timing class={} p50={}ms p95={}ms max={}ms",
class, p50, p95, max
);
assert!(p50 >= 650, "p50 too low for delayed reject class={}", class);
assert!(p95 <= 1200, "p95 too high for delayed reject class={}", class);
assert!(max <= 1500, "max too high for delayed reject class={}", class);
}
}
#[tokio::test]
async fn diagnostic_forwarded_size_profiles_by_probe_class() {
let classes = [0usize, 1usize, 7usize, 17usize, 63usize, 511usize, 1023usize, 2047usize];
let mut observed = Vec::new();
for class in classes {
let len = capture_forwarded_len(class).await;
println!("diagnostic_shape class={} forwarded_len={}", class, len);
observed.push(len as u128);
assert_eq!(len, 5 + class, "unexpected forwarded len for class={}", class);
}
let p50 = percentile_ms(observed.clone(), 50, 100);
let p95 = percentile_ms(observed.clone(), 95, 100);
let max = *observed.iter().max().unwrap();
println!(
"diagnostic_shape_summary p50={}bytes p95={}bytes max={}bytes",
p50, p95, max
);
assert!(p95 >= p50);
assert!(max >= p95);
}

View File

@@ -0,0 +1,701 @@
use super::*;
use crate::config::{UpstreamConfig, UpstreamType};
use crate::crypto::sha256_hmac;
use crate::protocol::constants::{HANDSHAKE_LEN, TLS_RECORD_APPLICATION, TLS_VERSION};
use crate::protocol::tls;
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpListener;
use tokio::time::{Duration, Instant};
struct Harness {
config: Arc<ProxyConfig>,
stats: Arc<Stats>,
upstream_manager: Arc<UpstreamManager>,
replay_checker: Arc<ReplayChecker>,
buffer_pool: Arc<BufferPool>,
rng: Arc<SecureRandom>,
route_runtime: Arc<RouteRuntimeController>,
ip_tracker: Arc<UserIpTracker>,
beobachten: Arc<BeobachtenStore>,
}
fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
Arc::new(UpstreamManager::new(
vec![UpstreamConfig {
upstream_type: UpstreamType::Direct {
interface: None,
bind_addresses: None,
},
weight: 1,
enabled: true,
scopes: String::new(),
selected_scope: String::new(),
}],
1,
1,
1,
1,
false,
stats,
))
}
fn build_harness(secret_hex: &str, mask_port: u16) -> Harness {
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = mask_port;
cfg.censorship.mask_proxy_protocol = 0;
cfg.access.ignore_time_skew = true;
cfg.access
.users
.insert("user".to_string(), secret_hex.to_string());
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
Harness {
config,
stats: stats.clone(),
upstream_manager: new_upstream_manager(stats),
replay_checker: Arc::new(ReplayChecker::new(512, Duration::from_secs(60))),
buffer_pool: Arc::new(BufferPool::new()),
rng: Arc::new(SecureRandom::new()),
route_runtime: Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
ip_tracker: Arc::new(UserIpTracker::new()),
beobachten: Arc::new(BeobachtenStore::new()),
}
}
fn make_valid_tls_client_hello(secret: &[u8], timestamp: u32, tls_len: usize, fill: u8) -> Vec<u8> {
assert!(tls_len <= u16::MAX as usize, "TLS length must fit into record header");
let total_len = 5 + tls_len;
let mut handshake = vec![fill; total_len];
handshake[0] = 0x16;
handshake[1] = 0x03;
handshake[2] = 0x01;
handshake[3..5].copy_from_slice(&(tls_len as u16).to_be_bytes());
let session_id_len: usize = 32;
handshake[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = session_id_len as u8;
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].fill(0);
let computed = sha256_hmac(secret, &handshake);
let mut digest = computed;
let ts = timestamp.to_le_bytes();
for i in 0..4 {
digest[28 + i] ^= ts[i];
}
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN]
.copy_from_slice(&digest);
handshake
}
fn wrap_tls_application_data(payload: &[u8]) -> Vec<u8> {
let mut record = Vec::with_capacity(5 + payload.len());
record.push(TLS_RECORD_APPLICATION);
record.extend_from_slice(&TLS_VERSION);
record.extend_from_slice(&(payload.len() as u16).to_be_bytes());
record.extend_from_slice(payload);
record
}
async fn read_tls_record_body<T>(stream: &mut T, header: [u8; 5])
where
T: tokio::io::AsyncRead + Unpin,
{
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
let mut body = vec![0u8; len];
stream.read_exact(&mut body).await.unwrap();
}
async fn run_tls_success_mtproto_fail_capture(
secret_hex: &str,
secret: [u8; 16],
timestamp: u32,
trailing_records: Vec<Vec<u8>>,
) -> Vec<u8> {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let expected_len = trailing_records.iter().map(Vec::len).sum::<usize>();
let expected_concat = trailing_records.concat();
let accept_task = tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut got = vec![0u8; expected_len];
stream.read_exact(&mut got).await.unwrap();
got
});
let harness = build_harness(secret_hex, backend_addr.port());
let client_hello = make_valid_tls_client_hello(&secret, timestamp, 600, 0x42);
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
let (server_side, mut client_side) = duplex(262144);
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.210:56010".parse().unwrap(),
harness.config,
harness.stats,
harness.upstream_manager,
harness.replay_checker,
harness.buffer_pool,
harness.rng,
None,
harness.route_runtime,
None,
harness.ip_tracker,
harness.beobachten,
false,
));
client_side.write_all(&client_hello).await.unwrap();
let mut tls_response_head = [0u8; 5];
client_side.read_exact(&mut tls_response_head).await.unwrap();
assert_eq!(tls_response_head[0], 0x16);
read_tls_record_body(&mut client_side, tls_response_head).await;
client_side.write_all(&invalid_mtproto_record).await.unwrap();
for record in trailing_records {
client_side.write_all(&record).await.unwrap();
}
let got = tokio::time::timeout(Duration::from_secs(3), accept_task)
.await
.unwrap()
.unwrap();
assert_eq!(got, expected_concat);
drop(client_side);
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
.await
.unwrap()
.unwrap();
got
}
#[tokio::test]
async fn masking_budget_survives_zero_handshake_timeout_with_delay() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = backend_addr.port();
cfg.timeouts.client_handshake = 0;
cfg.censorship.server_hello_delay_min_ms = 720;
cfg.censorship.server_hello_delay_max_ms = 720;
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
let accept_task = tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut got = vec![0u8; 605];
stream.read_exact(&mut got).await.unwrap();
got
});
let (server_side, mut client_side) = duplex(65536);
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.211:56011".parse().unwrap(),
config,
stats.clone(),
new_upstream_manager(stats.clone()),
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
Arc::new(BufferPool::new()),
Arc::new(SecureRandom::new()),
None,
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
None,
Arc::new(UserIpTracker::new()),
Arc::new(BeobachtenStore::new()),
false,
));
let mut invalid_hello = vec![0u8; 605];
invalid_hello[0] = 0x16;
invalid_hello[1] = 0x03;
invalid_hello[2] = 0x01;
invalid_hello[3..5].copy_from_slice(&600u16.to_be_bytes());
invalid_hello[5..].fill(0xA1);
let started = Instant::now();
client_side.write_all(&invalid_hello).await.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(3), accept_task)
.await
.unwrap()
.unwrap();
client_side.shutdown().await.unwrap();
let result = tokio::time::timeout(Duration::from_secs(3), handler)
.await
.unwrap()
.unwrap();
assert!(result.is_ok());
assert_eq!(stats.get_handshake_timeouts(), 0);
assert!(started.elapsed() >= Duration::from_millis(680));
}
#[tokio::test]
async fn tls_mtproto_fail_forwards_only_trailing_record() {
let tail = wrap_tls_application_data(b"tail-only");
let got = run_tls_success_mtproto_fail_capture(
"c1c1c1c1c1c1c1c1c1c1c1c1c1c1c1c1",
[0xC1; 16],
1,
vec![tail.clone()],
)
.await;
assert_eq!(got, tail);
}
#[tokio::test]
async fn replayed_tls_hello_gets_no_serverhello_and_is_masked() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let harness = build_harness("c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2", backend_addr.port());
let secret = [0xC2u8; 16];
let hello = make_valid_tls_client_hello(&secret, 2, 600, 0x41);
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
let first_tail = wrap_tls_application_data(b"seed");
let expected_hello = hello.clone();
let expected_tail = first_tail.clone();
let accept_task = tokio::spawn(async move {
let (mut s1, _) = listener.accept().await.unwrap();
let mut got_tail = vec![0u8; expected_tail.len()];
s1.read_exact(&mut got_tail).await.unwrap();
assert_eq!(got_tail, expected_tail);
drop(s1);
let (mut s2, _) = listener.accept().await.unwrap();
let mut got_hello = vec![0u8; expected_hello.len()];
s2.read_exact(&mut got_hello).await.unwrap();
assert_eq!(got_hello, expected_hello);
});
let run_session = |send_mtproto: bool| {
let (server_side, mut client_side) = duplex(131072);
let config = harness.config.clone();
let stats = harness.stats.clone();
let upstream = harness.upstream_manager.clone();
let replay = harness.replay_checker.clone();
let pool = harness.buffer_pool.clone();
let rng = harness.rng.clone();
let route = harness.route_runtime.clone();
let ipt = harness.ip_tracker.clone();
let beob = harness.beobachten.clone();
let hello = hello.clone();
let invalid_mtproto_record = invalid_mtproto_record.clone();
let first_tail = first_tail.clone();
async move {
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.212:56012".parse().unwrap(),
config,
stats,
upstream,
replay,
pool,
rng,
None,
route,
None,
ipt,
beob,
false,
));
client_side.write_all(&hello).await.unwrap();
if send_mtproto {
let mut head = [0u8; 5];
client_side.read_exact(&mut head).await.unwrap();
assert_eq!(head[0], 0x16);
read_tls_record_body(&mut client_side, head).await;
client_side.write_all(&invalid_mtproto_record).await.unwrap();
client_side.write_all(&first_tail).await.unwrap();
} else {
let mut one = [0u8; 1];
let no_server_hello = tokio::time::timeout(
Duration::from_millis(300),
client_side.read_exact(&mut one),
)
.await;
assert!(no_server_hello.is_err() || no_server_hello.unwrap().is_err());
}
client_side.shutdown().await.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
.await
.unwrap()
.unwrap();
}
};
run_session(true).await;
run_session(false).await;
tokio::time::timeout(Duration::from_secs(5), accept_task)
.await
.unwrap()
.unwrap();
}
#[tokio::test]
async fn connects_bad_increments_once_per_invalid_mtproto() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let harness = build_harness("c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3", backend_addr.port());
let stats = harness.stats.clone();
let bad_before = stats.get_connects_bad();
let tail = wrap_tls_application_data(b"accounting");
let expected_tail = tail.clone();
let accept_task = tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut got = vec![0u8; expected_tail.len()];
stream.read_exact(&mut got).await.unwrap();
assert_eq!(got, expected_tail);
});
let hello = make_valid_tls_client_hello(&[0xC3; 16], 3, 600, 0x42);
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
let (server_side, mut client_side) = duplex(131072);
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.213:56013".parse().unwrap(),
harness.config,
harness.stats,
harness.upstream_manager,
harness.replay_checker,
harness.buffer_pool,
harness.rng,
None,
harness.route_runtime,
None,
harness.ip_tracker,
harness.beobachten,
false,
));
client_side.write_all(&hello).await.unwrap();
let mut head = [0u8; 5];
client_side.read_exact(&mut head).await.unwrap();
read_tls_record_body(&mut client_side, head).await;
client_side.write_all(&invalid_mtproto_record).await.unwrap();
client_side.write_all(&tail).await.unwrap();
tokio::time::timeout(Duration::from_secs(3), accept_task)
.await
.unwrap()
.unwrap();
client_side.shutdown().await.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
.await
.unwrap()
.unwrap();
assert_eq!(stats.get_connects_bad(), bad_before + 1);
}
#[tokio::test]
async fn truncated_clienthello_forwards_only_seen_prefix() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.censorship.mask = true;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = backend_addr.port();
cfg.censorship.mask_unix_sock = None;
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
let expected_prefix_len = 5 + 17;
let accept_task = tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut got = vec![0u8; expected_prefix_len];
stream.read_exact(&mut got).await.unwrap();
got
});
let (server_side, mut client_side) = duplex(65536);
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.214:56014".parse().unwrap(),
config,
stats,
new_upstream_manager(Arc::new(Stats::new())),
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
Arc::new(BufferPool::new()),
Arc::new(SecureRandom::new()),
None,
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
None,
Arc::new(UserIpTracker::new()),
Arc::new(BeobachtenStore::new()),
false,
));
let mut hello = vec![0u8; 5 + 17];
hello[0] = 0x16;
hello[1] = 0x03;
hello[2] = 0x01;
hello[3..5].copy_from_slice(&600u16.to_be_bytes());
hello[5..].fill(0x55);
client_side.write_all(&hello).await.unwrap();
client_side.shutdown().await.unwrap();
let got = tokio::time::timeout(Duration::from_secs(3), accept_task)
.await
.unwrap()
.unwrap();
assert_eq!(got, hello);
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
.await
.unwrap()
.unwrap();
}
#[tokio::test]
async fn out_of_bounds_tls_len_forwards_header_only() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.censorship.mask = true;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = backend_addr.port();
cfg.censorship.mask_unix_sock = None;
let config = Arc::new(cfg);
let accept_task = tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut got = [0u8; 5];
stream.read_exact(&mut got).await.unwrap();
got
});
let (server_side, mut client_side) = duplex(8192);
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.215:56015".parse().unwrap(),
config,
Arc::new(Stats::new()),
new_upstream_manager(Arc::new(Stats::new())),
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
Arc::new(BufferPool::new()),
Arc::new(SecureRandom::new()),
None,
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
None,
Arc::new(UserIpTracker::new()),
Arc::new(BeobachtenStore::new()),
false,
));
let hdr = [0x16, 0x03, 0x01, 0x42, 0x69];
client_side.write_all(&hdr).await.unwrap();
client_side.shutdown().await.unwrap();
let got = tokio::time::timeout(Duration::from_secs(3), accept_task)
.await
.unwrap()
.unwrap();
assert_eq!(got, hdr);
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
.await
.unwrap()
.unwrap();
}
#[tokio::test]
async fn non_tls_with_modes_disabled_is_masked() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.censorship.mask = true;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = backend_addr.port();
cfg.censorship.mask_unix_sock = None;
cfg.general.modes.classic = false;
cfg.general.modes.secure = false;
let config = Arc::new(cfg);
let accept_task = tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut got = [0u8; 5];
stream.read_exact(&mut got).await.unwrap();
got
});
let (server_side, mut client_side) = duplex(8192);
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.216:56016".parse().unwrap(),
config,
Arc::new(Stats::new()),
new_upstream_manager(Arc::new(Stats::new())),
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
Arc::new(BufferPool::new()),
Arc::new(SecureRandom::new()),
None,
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
None,
Arc::new(UserIpTracker::new()),
Arc::new(BeobachtenStore::new()),
false,
));
let probe = *b"HELLO";
client_side.write_all(&probe).await.unwrap();
client_side.shutdown().await.unwrap();
let got = tokio::time::timeout(Duration::from_secs(3), accept_task)
.await
.unwrap()
.unwrap();
assert_eq!(got, probe);
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
.await
.unwrap()
.unwrap();
}
#[tokio::test]
async fn concurrent_tls_mtproto_fail_sessions_are_isolated() {
let sessions = 12usize;
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let mut expected = std::collections::HashSet::new();
for idx in 0..sessions {
let payload = vec![idx as u8; 32 + idx];
expected.insert(wrap_tls_application_data(&payload));
}
let accept_task = tokio::spawn(async move {
let mut remaining = expected;
for _ in 0..sessions {
let (mut stream, _) = listener.accept().await.unwrap();
let mut header = [0u8; 5];
stream.read_exact(&mut header).await.unwrap();
assert_eq!(header[0], TLS_RECORD_APPLICATION);
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
let mut record = vec![0u8; 5 + len];
record[..5].copy_from_slice(&header);
stream.read_exact(&mut record[5..]).await.unwrap();
assert!(remaining.remove(&record));
}
assert!(remaining.is_empty());
});
let mut tasks = Vec::with_capacity(sessions);
for idx in 0..sessions {
let secret_hex = "c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4";
let harness = build_harness(secret_hex, backend_addr.port());
let hello = make_valid_tls_client_hello(&[0xC4; 16], 20 + idx as u32, 600, 0x40 + idx as u8);
let invalid_mtproto = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
let trailing = wrap_tls_application_data(&vec![idx as u8; 32 + idx]);
let peer: SocketAddr = format!("198.51.100.217:{}", 56100 + idx as u16)
.parse()
.unwrap();
tasks.push(tokio::spawn(async move {
let (server_side, mut client_side) = duplex(131072);
let handler = tokio::spawn(handle_client_stream(
server_side,
peer,
harness.config,
harness.stats,
harness.upstream_manager,
harness.replay_checker,
harness.buffer_pool,
harness.rng,
None,
harness.route_runtime,
None,
harness.ip_tracker,
harness.beobachten,
false,
));
client_side.write_all(&hello).await.unwrap();
let mut head = [0u8; 5];
client_side.read_exact(&mut head).await.unwrap();
read_tls_record_body(&mut client_side, head).await;
client_side.write_all(&invalid_mtproto).await.unwrap();
client_side.write_all(&trailing).await.unwrap();
client_side.shutdown().await.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
.await
.unwrap()
.unwrap();
}));
}
for task in tasks {
task.await.unwrap();
}
tokio::time::timeout(Duration::from_secs(6), accept_task)
.await
.unwrap()
.unwrap();
}
macro_rules! tail_length_case {
($name:ident, $hex:expr, $secret:expr, $ts:expr, $len:expr) => {
#[tokio::test]
async fn $name() {
let mut payload = vec![0u8; $len];
for (i, b) in payload.iter_mut().enumerate() {
*b = (i as u8).wrapping_mul(17).wrapping_add(5);
}
let record = wrap_tls_application_data(&payload);
let got = run_tls_success_mtproto_fail_capture($hex, $secret, $ts, vec![record.clone()]).await;
assert_eq!(got, record);
}
};
}
tail_length_case!(tail_len_1_preserved, "d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1", [0xD1; 16], 30, 1);
tail_length_case!(tail_len_2_preserved, "d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2", [0xD2; 16], 31, 2);
tail_length_case!(tail_len_3_preserved, "d3d3d3d3d3d3d3d3d3d3d3d3d3d3d3d3", [0xD3; 16], 32, 3);
tail_length_case!(tail_len_7_preserved, "d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4", [0xD4; 16], 33, 7);
tail_length_case!(tail_len_31_preserved, "d5d5d5d5d5d5d5d5d5d5d5d5d5d5d5d5", [0xD5; 16], 34, 31);
tail_length_case!(tail_len_127_preserved, "d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6", [0xD6; 16], 35, 127);
tail_length_case!(tail_len_511_preserved, "d7d7d7d7d7d7d7d7d7d7d7d7d7d7d7d7", [0xD7; 16], 36, 511);
tail_length_case!(tail_len_1023_preserved, "d8d8d8d8d8d8d8d8d8d8d8d8d8d8d8d8", [0xD8; 16], 37, 1023);

View File

@@ -0,0 +1,556 @@
use super::*;
use crate::config::{UpstreamConfig, UpstreamType};
use crate::crypto::sha256_hmac;
use crate::protocol::constants::{HANDSHAKE_LEN, TLS_VERSION};
use crate::protocol::tls;
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpListener;
use tokio::time::{Duration, Instant};
struct RedTeamHarness {
config: Arc<ProxyConfig>,
stats: Arc<Stats>,
upstream_manager: Arc<UpstreamManager>,
replay_checker: Arc<ReplayChecker>,
buffer_pool: Arc<BufferPool>,
rng: Arc<SecureRandom>,
route_runtime: Arc<RouteRuntimeController>,
ip_tracker: Arc<UserIpTracker>,
beobachten: Arc<BeobachtenStore>,
}
fn build_harness(secret_hex: &str, mask_port: u16) -> RedTeamHarness {
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = mask_port;
cfg.censorship.mask_proxy_protocol = 0;
cfg.access.ignore_time_skew = true;
cfg.access
.users
.insert("user".to_string(), secret_hex.to_string());
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
let upstream_manager = Arc::new(UpstreamManager::new(
vec![UpstreamConfig {
upstream_type: UpstreamType::Direct {
interface: None,
bind_addresses: None,
},
weight: 1,
enabled: true,
scopes: String::new(),
selected_scope: String::new(),
}],
1,
1,
1,
1,
false,
stats.clone(),
));
RedTeamHarness {
config,
stats,
upstream_manager,
replay_checker: Arc::new(ReplayChecker::new(256, Duration::from_secs(60))),
buffer_pool: Arc::new(BufferPool::new()),
rng: Arc::new(SecureRandom::new()),
route_runtime: Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
ip_tracker: Arc::new(UserIpTracker::new()),
beobachten: Arc::new(BeobachtenStore::new()),
}
}
fn make_valid_tls_client_hello(secret: &[u8], timestamp: u32, tls_len: usize, fill: u8) -> Vec<u8> {
assert!(tls_len <= u16::MAX as usize, "TLS length must fit into record header");
let total_len = 5 + tls_len;
let mut handshake = vec![fill; total_len];
handshake[0] = 0x16;
handshake[1] = 0x03;
handshake[2] = 0x01;
handshake[3..5].copy_from_slice(&(tls_len as u16).to_be_bytes());
let session_id_len: usize = 32;
handshake[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = session_id_len as u8;
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].fill(0);
let computed = sha256_hmac(secret, &handshake);
let mut digest = computed;
let ts = timestamp.to_le_bytes();
for i in 0..4 {
digest[28 + i] ^= ts[i];
}
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN]
.copy_from_slice(&digest);
handshake
}
fn wrap_tls_application_data(payload: &[u8]) -> Vec<u8> {
let mut record = Vec::with_capacity(5 + payload.len());
record.push(0x17);
record.extend_from_slice(&TLS_VERSION);
record.extend_from_slice(&(payload.len() as u16).to_be_bytes());
record.extend_from_slice(payload);
record
}
async fn run_tls_success_mtproto_fail_session(
secret_hex: &str,
secret: [u8; 16],
timestamp: u32,
tail: Vec<u8>,
) -> Vec<u8> {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let harness = build_harness(secret_hex, backend_addr.port());
let client_hello = make_valid_tls_client_hello(&secret, timestamp, 600, 0x42);
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
let trailing_record = wrap_tls_application_data(&tail);
let accept_task = tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut got = vec![0u8; trailing_record.len()];
stream.read_exact(&mut got).await.unwrap();
got
});
let (server_side, mut client_side) = duplex(262144);
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.250:56900".parse().unwrap(),
harness.config,
harness.stats,
harness.upstream_manager,
harness.replay_checker,
harness.buffer_pool,
harness.rng,
None,
harness.route_runtime,
None,
harness.ip_tracker,
harness.beobachten,
false,
));
client_side.write_all(&client_hello).await.unwrap();
let mut head = [0u8; 5];
client_side.read_exact(&mut head).await.unwrap();
assert_eq!(head[0], 0x16);
let body_len = u16::from_be_bytes([head[3], head[4]]) as usize;
let mut body = vec![0u8; body_len];
client_side.read_exact(&mut body).await.unwrap();
client_side.write_all(&invalid_mtproto_record).await.unwrap();
client_side.write_all(&wrap_tls_application_data(&tail)).await.unwrap();
let forwarded = tokio::time::timeout(Duration::from_secs(3), accept_task)
.await
.unwrap()
.unwrap();
drop(client_side);
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
.await
.unwrap()
.unwrap();
forwarded
}
#[tokio::test]
#[ignore = "red-team expected-fail: demonstrates that post-TLS fallback still forwards data to backend"]
async fn redteam_01_backend_receives_no_data_after_mtproto_fail() {
let forwarded = run_tls_success_mtproto_fail_session(
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
[0xAA; 16],
1,
b"probe-a".to_vec(),
)
.await;
assert!(forwarded.is_empty(), "backend unexpectedly received fallback bytes");
}
#[tokio::test]
#[ignore = "red-team expected-fail: strict no-fallback policy hypothesis"]
async fn redteam_02_backend_must_never_receive_tls_records_after_mtproto_fail() {
let forwarded = run_tls_success_mtproto_fail_session(
"abababababababababababababababab",
[0xAB; 16],
2,
b"probe-b".to_vec(),
)
.await;
assert_ne!(forwarded[0], 0x17, "received TLS application record despite strict policy");
}
#[tokio::test]
#[ignore = "red-team expected-fail: impossible timing uniformity target"]
async fn redteam_03_masking_duration_must_be_less_than_1ms_when_backend_down() {
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.censorship.mask = true;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = 1;
cfg.access.ignore_time_skew = true;
cfg.access
.users
.insert("user".to_string(), "acacacacacacacacacacacacacacacac".to_string());
let harness = RedTeamHarness {
config: Arc::new(cfg),
stats: Arc::new(Stats::new()),
upstream_manager: Arc::new(UpstreamManager::new(
vec![UpstreamConfig {
upstream_type: UpstreamType::Direct {
interface: None,
bind_addresses: None,
},
weight: 1,
enabled: true,
scopes: String::new(),
selected_scope: String::new(),
}],
1,
1,
1,
1,
false,
Arc::new(Stats::new()),
)),
replay_checker: Arc::new(ReplayChecker::new(256, Duration::from_secs(60))),
buffer_pool: Arc::new(BufferPool::new()),
rng: Arc::new(SecureRandom::new()),
route_runtime: Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
ip_tracker: Arc::new(UserIpTracker::new()),
beobachten: Arc::new(BeobachtenStore::new()),
};
let hello = make_valid_tls_client_hello(&[0xAC; 16], 3, 600, 0x42);
let (server_side, mut client_side) = duplex(131072);
let started = Instant::now();
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.251:56901".parse().unwrap(),
harness.config,
harness.stats,
harness.upstream_manager,
harness.replay_checker,
harness.buffer_pool,
harness.rng,
None,
harness.route_runtime,
None,
harness.ip_tracker,
harness.beobachten,
false,
));
client_side.write_all(&hello).await.unwrap();
client_side.shutdown().await.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
.await
.unwrap()
.unwrap();
assert!(started.elapsed() < Duration::from_millis(1), "fallback path took longer than 1ms");
}
macro_rules! redteam_tail_must_not_forward_case {
($name:ident, $hex:expr, $secret:expr, $ts:expr, $len:expr) => {
#[tokio::test]
#[ignore = "red-team expected-fail: strict no-forwarding hypothesis"]
async fn $name() {
let mut tail = vec![0u8; $len];
for (i, b) in tail.iter_mut().enumerate() {
*b = (i as u8).wrapping_mul(31).wrapping_add(7);
}
let forwarded = run_tls_success_mtproto_fail_session($hex, $secret, $ts, tail).await;
assert!(
forwarded.is_empty(),
"strict model expects zero forwarded bytes, got {}",
forwarded.len()
);
}
};
}
redteam_tail_must_not_forward_case!(redteam_04_tail_len_1_not_forwarded, "adadadadadadadadadadadadadadadad", [0xAD; 16], 4, 1);
redteam_tail_must_not_forward_case!(redteam_05_tail_len_2_not_forwarded, "aeaeaeaeaeaeaeaeaeaeaeaeaeaeaeae", [0xAE; 16], 5, 2);
redteam_tail_must_not_forward_case!(redteam_06_tail_len_3_not_forwarded, "afafafafafafafafafafafafafafafaf", [0xAF; 16], 6, 3);
redteam_tail_must_not_forward_case!(redteam_07_tail_len_7_not_forwarded, "b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0", [0xB0; 16], 7, 7);
redteam_tail_must_not_forward_case!(redteam_08_tail_len_15_not_forwarded, "b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1", [0xB1; 16], 8, 15);
redteam_tail_must_not_forward_case!(redteam_09_tail_len_63_not_forwarded, "b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2", [0xB2; 16], 9, 63);
redteam_tail_must_not_forward_case!(redteam_10_tail_len_127_not_forwarded, "b3b3b3b3b3b3b3b3b3b3b3b3b3b3b3b3", [0xB3; 16], 10, 127);
redteam_tail_must_not_forward_case!(redteam_11_tail_len_255_not_forwarded, "b4b4b4b4b4b4b4b4b4b4b4b4b4b4b4b4", [0xB4; 16], 11, 255);
redteam_tail_must_not_forward_case!(redteam_12_tail_len_511_not_forwarded, "b5b5b5b5b5b5b5b5b5b5b5b5b5b5b5b5", [0xB5; 16], 12, 511);
redteam_tail_must_not_forward_case!(redteam_13_tail_len_1023_not_forwarded, "b6b6b6b6b6b6b6b6b6b6b6b6b6b6b6b6", [0xB6; 16], 13, 1023);
redteam_tail_must_not_forward_case!(redteam_14_tail_len_2047_not_forwarded, "b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7", [0xB7; 16], 14, 2047);
redteam_tail_must_not_forward_case!(redteam_15_tail_len_4095_not_forwarded, "b8b8b8b8b8b8b8b8b8b8b8b8b8b8b8b8", [0xB8; 16], 15, 4095);
#[tokio::test]
#[ignore = "red-team expected-fail: impossible indistinguishability envelope"]
async fn redteam_16_timing_delta_between_paths_must_be_sub_1ms_under_concurrency() {
let runs = 20usize;
let mut durations = Vec::with_capacity(runs);
for i in 0..runs {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let secret = [0xB9u8; 16];
let harness = build_harness("b9b9b9b9b9b9b9b9b9b9b9b9b9b9b9b9", backend_addr.port());
let hello = make_valid_tls_client_hello(&secret, 100 + i as u32, 600, 0x42);
let accept_task = tokio::spawn(async move {
let (_stream, _) = listener.accept().await.unwrap();
});
let (server_side, mut client_side) = duplex(65536);
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.252:56902".parse().unwrap(),
harness.config,
harness.stats,
harness.upstream_manager,
harness.replay_checker,
harness.buffer_pool,
harness.rng,
None,
harness.route_runtime,
None,
harness.ip_tracker,
harness.beobachten,
false,
));
let started = Instant::now();
client_side.write_all(&hello).await.unwrap();
client_side.shutdown().await.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
.await
.unwrap()
.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(3), accept_task)
.await
.unwrap()
.unwrap();
durations.push(started.elapsed());
}
let min = durations.iter().copied().min().unwrap();
let max = durations.iter().copied().max().unwrap();
assert!(max - min <= Duration::from_millis(1), "timing spread too wide for strict anti-probing envelope");
}
async fn measure_invalid_probe_duration_ms(
delay_ms: u64,
tls_len: u16,
body_sent: usize,
) -> u128 {
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = 1;
cfg.timeouts.client_handshake = 1;
cfg.censorship.server_hello_delay_min_ms = delay_ms;
cfg.censorship.server_hello_delay_max_ms = delay_ms;
let (server_side, mut client_side) = duplex(65536);
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.253:56903".parse().unwrap(),
Arc::new(cfg),
Arc::new(Stats::new()),
Arc::new(UpstreamManager::new(
vec![UpstreamConfig {
upstream_type: UpstreamType::Direct {
interface: None,
bind_addresses: None,
},
weight: 1,
enabled: true,
scopes: String::new(),
selected_scope: String::new(),
}],
1,
1,
1,
1,
false,
Arc::new(Stats::new()),
)),
Arc::new(ReplayChecker::new(256, Duration::from_secs(60))),
Arc::new(BufferPool::new()),
Arc::new(SecureRandom::new()),
None,
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
None,
Arc::new(UserIpTracker::new()),
Arc::new(BeobachtenStore::new()),
false,
));
let mut probe = vec![0u8; 5 + body_sent];
probe[0] = 0x16;
probe[1] = 0x03;
probe[2] = 0x01;
probe[3..5].copy_from_slice(&tls_len.to_be_bytes());
probe[5..].fill(0xD7);
let started = Instant::now();
client_side.write_all(&probe).await.unwrap();
client_side.shutdown().await.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(4), handler)
.await
.unwrap()
.unwrap();
started.elapsed().as_millis()
}
async fn capture_forwarded_probe_len(tls_len: u16, body_sent: usize) -> usize {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = backend_addr.port();
cfg.timeouts.client_handshake = 1;
let accept_task = tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut got = Vec::new();
let _ = tokio::time::timeout(Duration::from_secs(2), stream.read_to_end(&mut got)).await;
got.len()
});
let (server_side, mut client_side) = duplex(65536);
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.254:56904".parse().unwrap(),
Arc::new(cfg),
Arc::new(Stats::new()),
Arc::new(UpstreamManager::new(
vec![UpstreamConfig {
upstream_type: UpstreamType::Direct {
interface: None,
bind_addresses: None,
},
weight: 1,
enabled: true,
scopes: String::new(),
selected_scope: String::new(),
}],
1,
1,
1,
1,
false,
Arc::new(Stats::new()),
)),
Arc::new(ReplayChecker::new(256, Duration::from_secs(60))),
Arc::new(BufferPool::new()),
Arc::new(SecureRandom::new()),
None,
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
None,
Arc::new(UserIpTracker::new()),
Arc::new(BeobachtenStore::new()),
false,
));
let mut probe = vec![0u8; 5 + body_sent];
probe[0] = 0x16;
probe[1] = 0x03;
probe[2] = 0x01;
probe[3..5].copy_from_slice(&tls_len.to_be_bytes());
probe[5..].fill(0xBC);
client_side.write_all(&probe).await.unwrap();
client_side.shutdown().await.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(4), handler)
.await
.unwrap()
.unwrap();
tokio::time::timeout(Duration::from_secs(4), accept_task)
.await
.unwrap()
.unwrap()
}
macro_rules! redteam_timing_envelope_case {
($name:ident, $delay_ms:expr, $tls_len:expr, $body_sent:expr, $max_ms:expr) => {
#[tokio::test]
#[ignore = "red-team expected-fail: unrealistically tight reject timing envelope"]
async fn $name() {
let elapsed_ms = measure_invalid_probe_duration_ms($delay_ms, $tls_len, $body_sent).await;
assert!(
elapsed_ms <= $max_ms,
"timing envelope violated: elapsed={}ms, max={}ms",
elapsed_ms,
$max_ms
);
}
};
}
macro_rules! redteam_constant_shape_case {
($name:ident, $tls_len:expr, $body_sent:expr, $expected_len:expr) => {
#[tokio::test]
#[ignore = "red-team expected-fail: strict constant-shape backend fingerprint hypothesis"]
async fn $name() {
let got = capture_forwarded_probe_len($tls_len, $body_sent).await;
assert_eq!(
got,
$expected_len,
"fingerprint shape mismatch: got={} expected={} (strict constant-shape model)",
got,
$expected_len
);
}
};
}
redteam_timing_envelope_case!(redteam_17_timing_env_very_tight_00, 700, 600, 0, 3);
redteam_timing_envelope_case!(redteam_18_timing_env_very_tight_01, 700, 600, 1, 3);
redteam_timing_envelope_case!(redteam_19_timing_env_very_tight_02, 700, 600, 7, 3);
redteam_timing_envelope_case!(redteam_20_timing_env_very_tight_03, 700, 600, 17, 3);
redteam_timing_envelope_case!(redteam_21_timing_env_very_tight_04, 700, 600, 31, 3);
redteam_timing_envelope_case!(redteam_22_timing_env_very_tight_05, 700, 600, 63, 3);
redteam_timing_envelope_case!(redteam_23_timing_env_very_tight_06, 700, 600, 127, 3);
redteam_timing_envelope_case!(redteam_24_timing_env_very_tight_07, 700, 600, 255, 3);
redteam_timing_envelope_case!(redteam_25_timing_env_very_tight_08, 700, 600, 511, 3);
redteam_timing_envelope_case!(redteam_26_timing_env_very_tight_09, 700, 600, 1023, 3);
redteam_timing_envelope_case!(redteam_27_timing_env_very_tight_10, 700, 600, 2047, 3);
redteam_timing_envelope_case!(redteam_28_timing_env_very_tight_11, 700, 600, 4095, 3);
redteam_constant_shape_case!(redteam_29_constant_shape_00, 600, 0, 517);
redteam_constant_shape_case!(redteam_30_constant_shape_01, 600, 1, 517);
redteam_constant_shape_case!(redteam_31_constant_shape_02, 600, 7, 517);
redteam_constant_shape_case!(redteam_32_constant_shape_03, 600, 17, 517);
redteam_constant_shape_case!(redteam_33_constant_shape_04, 600, 31, 517);
redteam_constant_shape_case!(redteam_34_constant_shape_05, 600, 63, 517);
redteam_constant_shape_case!(redteam_35_constant_shape_06, 600, 127, 517);
redteam_constant_shape_case!(redteam_36_constant_shape_07, 600, 255, 517);
redteam_constant_shape_case!(redteam_37_constant_shape_08, 600, 511, 517);
redteam_constant_shape_case!(redteam_38_constant_shape_09, 600, 1023, 517);
redteam_constant_shape_case!(redteam_39_constant_shape_10, 600, 2047, 517);
redteam_constant_shape_case!(redteam_40_constant_shape_11, 600, 4095, 517);

View File

@@ -0,0 +1,122 @@
use super::*;
use crate::config::{UpstreamConfig, UpstreamType};
use std::sync::Arc;
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpListener;
use tokio::time::Duration;
fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
Arc::new(UpstreamManager::new(
vec![UpstreamConfig {
upstream_type: UpstreamType::Direct {
interface: None,
bind_addresses: None,
},
weight: 1,
enabled: true,
scopes: String::new(),
selected_scope: String::new(),
}],
1,
1,
1,
1,
false,
stats,
))
}
async fn run_probe_capture(
body_sent: usize,
tls_len: u16,
enable_shape_hardening: bool,
floor: usize,
cap: usize,
) -> Vec<u8> {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = backend_addr.port();
cfg.censorship.mask_shape_hardening = enable_shape_hardening;
cfg.censorship.mask_shape_bucket_floor_bytes = floor;
cfg.censorship.mask_shape_bucket_cap_bytes = cap;
let accept_task = tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut got = Vec::new();
let _ = tokio::time::timeout(Duration::from_secs(2), stream.read_to_end(&mut got)).await;
got
});
let (server_side, mut client_side) = duplex(65536);
let handler = tokio::spawn(handle_client_stream(
server_side,
"198.51.100.188:56888".parse().unwrap(),
Arc::new(cfg),
Arc::new(Stats::new()),
new_upstream_manager(Arc::new(Stats::new())),
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
Arc::new(BufferPool::new()),
Arc::new(SecureRandom::new()),
None,
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
None,
Arc::new(UserIpTracker::new()),
Arc::new(BeobachtenStore::new()),
false,
));
let mut probe = vec![0u8; 5 + body_sent];
probe[0] = 0x16;
probe[1] = 0x03;
probe[2] = 0x01;
probe[3..5].copy_from_slice(&tls_len.to_be_bytes());
probe[5..].fill(0x66);
client_side.write_all(&probe).await.unwrap();
client_side.shutdown().await.unwrap();
let result = tokio::time::timeout(Duration::from_secs(4), handler)
.await
.unwrap()
.unwrap();
assert!(result.is_ok());
tokio::time::timeout(Duration::from_secs(4), accept_task)
.await
.unwrap()
.unwrap()
}
#[tokio::test]
async fn shape_hardening_disabled_keeps_original_probe_length() {
let got = run_probe_capture(17, 600, false, 512, 4096).await;
assert_eq!(got.len(), 22);
assert_eq!(&got[..5], &[0x16, 0x03, 0x01, 0x02, 0x58]);
}
#[tokio::test]
async fn shape_hardening_enabled_pads_small_probe_to_floor_bucket() {
let got = run_probe_capture(17, 600, true, 512, 4096).await;
assert_eq!(got.len(), 512);
assert_eq!(&got[..5], &[0x16, 0x03, 0x01, 0x02, 0x58]);
}
#[tokio::test]
async fn shape_hardening_enabled_pads_mid_probe_to_next_bucket() {
let got = run_probe_capture(511, 600, true, 512, 4096).await;
assert_eq!(got.len(), 1024);
assert_eq!(&got[..5], &[0x16, 0x03, 0x01, 0x02, 0x58]);
}
#[tokio::test]
async fn shape_hardening_respects_cap_and_avoids_padding_above_cap() {
let got = run_probe_capture(5000, 7000, true, 512, 4096).await;
assert_eq!(got.len(), 5005);
assert_eq!(&got[..5], &[0x16, 0x03, 0x01, 0x1b, 0x58]);
}

View File

@@ -0,0 +1,254 @@
use super::*;
use crate::config::{UpstreamConfig, UpstreamType};
use crate::crypto::sha256_hmac;
use crate::protocol::constants::{HANDSHAKE_LEN, TLS_RECORD_APPLICATION, TLS_VERSION};
use crate::protocol::tls;
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpListener;
use tokio::time::Duration;
struct StressHarness {
config: Arc<ProxyConfig>,
stats: Arc<Stats>,
upstream_manager: Arc<UpstreamManager>,
replay_checker: Arc<ReplayChecker>,
buffer_pool: Arc<BufferPool>,
rng: Arc<SecureRandom>,
route_runtime: Arc<RouteRuntimeController>,
ip_tracker: Arc<UserIpTracker>,
beobachten: Arc<BeobachtenStore>,
}
fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
Arc::new(UpstreamManager::new(
vec![UpstreamConfig {
upstream_type: UpstreamType::Direct {
interface: None,
bind_addresses: None,
},
weight: 1,
enabled: true,
scopes: String::new(),
selected_scope: String::new(),
}],
1,
1,
1,
1,
false,
stats,
))
}
fn build_harness(mask_port: u16, secret_hex: &str) -> StressHarness {
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = mask_port;
cfg.censorship.mask_proxy_protocol = 0;
cfg.access.ignore_time_skew = true;
cfg.access
.users
.insert("user".to_string(), secret_hex.to_string());
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
StressHarness {
config,
stats: stats.clone(),
upstream_manager: new_upstream_manager(stats),
replay_checker: Arc::new(ReplayChecker::new(1024, Duration::from_secs(60))),
buffer_pool: Arc::new(BufferPool::new()),
rng: Arc::new(SecureRandom::new()),
route_runtime: Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
ip_tracker: Arc::new(UserIpTracker::new()),
beobachten: Arc::new(BeobachtenStore::new()),
}
}
fn make_valid_tls_client_hello(secret: &[u8], timestamp: u32, tls_len: usize, fill: u8) -> Vec<u8> {
assert!(tls_len <= u16::MAX as usize, "TLS length must fit into record header");
let total_len = 5 + tls_len;
let mut handshake = vec![fill; total_len];
handshake[0] = 0x16;
handshake[1] = 0x03;
handshake[2] = 0x01;
handshake[3..5].copy_from_slice(&(tls_len as u16).to_be_bytes());
let session_id_len: usize = 32;
handshake[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = session_id_len as u8;
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].fill(0);
let computed = sha256_hmac(secret, &handshake);
let mut digest = computed;
let ts = timestamp.to_le_bytes();
for i in 0..4 {
digest[28 + i] ^= ts[i];
}
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN]
.copy_from_slice(&digest);
handshake
}
fn wrap_tls_application_data(payload: &[u8]) -> Vec<u8> {
let mut record = Vec::with_capacity(5 + payload.len());
record.push(TLS_RECORD_APPLICATION);
record.extend_from_slice(&TLS_VERSION);
record.extend_from_slice(&(payload.len() as u16).to_be_bytes());
record.extend_from_slice(payload);
record
}
async fn read_tls_record_body<T>(stream: &mut T, header: [u8; 5])
where
T: tokio::io::AsyncRead + Unpin,
{
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
let mut body = vec![0u8; len];
stream.read_exact(&mut body).await.unwrap();
}
async fn run_parallel_tail_fallback_case(
sessions: usize,
payload_len: usize,
write_chunk: usize,
ts_base: u32,
peer_port_base: u16,
) {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let mut expected = std::collections::HashSet::new();
for idx in 0..sessions {
let payload = vec![((idx * 37) & 0xff) as u8; payload_len + idx % 3];
expected.insert(wrap_tls_application_data(&payload));
}
let accept_task = tokio::spawn(async move {
let mut remaining = expected;
for _ in 0..sessions {
let (mut stream, _) = listener.accept().await.unwrap();
let mut header = [0u8; 5];
stream.read_exact(&mut header).await.unwrap();
assert_eq!(header[0], TLS_RECORD_APPLICATION);
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
let mut record = vec![0u8; 5 + len];
record[..5].copy_from_slice(&header);
stream.read_exact(&mut record[5..]).await.unwrap();
assert!(remaining.remove(&record));
}
assert!(remaining.is_empty());
});
let mut tasks = Vec::with_capacity(sessions);
for idx in 0..sessions {
let harness = build_harness(backend_addr.port(), "e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0");
let hello = make_valid_tls_client_hello(
&[0xE0; 16],
ts_base + idx as u32,
600,
0x40 + (idx as u8),
);
let invalid_mtproto = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
let payload = vec![((idx * 37) & 0xff) as u8; payload_len + idx % 3];
let trailing = wrap_tls_application_data(&payload);
// Keep source IPs unique across stress cases so global pre-auth probe state
// cannot contaminate unrelated sessions and make this test nondeterministic.
let peer_ip_third = 100 + ((ts_base as u8) / 10);
let peer_ip_fourth = (idx as u8).saturating_add(1);
let peer: SocketAddr = format!(
"198.51.{}.{}:{}",
peer_ip_third,
peer_ip_fourth,
peer_port_base + idx as u16
)
.parse()
.unwrap();
tasks.push(tokio::spawn(async move {
let (server_side, mut client_side) = duplex(262144);
let handler = tokio::spawn(handle_client_stream(
server_side,
peer,
harness.config,
harness.stats,
harness.upstream_manager,
harness.replay_checker,
harness.buffer_pool,
harness.rng,
None,
harness.route_runtime,
None,
harness.ip_tracker,
harness.beobachten,
false,
));
client_side.write_all(&hello).await.unwrap();
let mut server_hello_head = [0u8; 5];
client_side.read_exact(&mut server_hello_head).await.unwrap();
assert_eq!(server_hello_head[0], 0x16);
read_tls_record_body(&mut client_side, server_hello_head).await;
client_side.write_all(&invalid_mtproto).await.unwrap();
for chunk in trailing.chunks(write_chunk.max(1)) {
client_side.write_all(chunk).await.unwrap();
}
client_side.shutdown().await.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(4), handler)
.await
.unwrap()
.unwrap();
}));
}
for task in tasks {
task.await.unwrap();
}
tokio::time::timeout(Duration::from_secs(8), accept_task)
.await
.unwrap()
.unwrap();
}
macro_rules! stress_case {
($name:ident, $sessions:expr, $payload_len:expr, $chunk:expr, $ts:expr, $port:expr) => {
#[tokio::test]
async fn $name() {
run_parallel_tail_fallback_case($sessions, $payload_len, $chunk, $ts, $port).await;
}
};
}
stress_case!(stress_masking_parallel_s01, 4, 16, 1, 1000, 57000);
stress_case!(stress_masking_parallel_s02, 5, 24, 2, 1010, 57010);
stress_case!(stress_masking_parallel_s03, 6, 32, 3, 1020, 57020);
stress_case!(stress_masking_parallel_s04, 7, 40, 4, 1030, 57030);
stress_case!(stress_masking_parallel_s05, 8, 48, 5, 1040, 57040);
stress_case!(stress_masking_parallel_s06, 9, 56, 6, 1050, 57050);
stress_case!(stress_masking_parallel_s07, 10, 64, 7, 1060, 57060);
stress_case!(stress_masking_parallel_s08, 11, 72, 8, 1070, 57070);
stress_case!(stress_masking_parallel_s09, 12, 80, 9, 1080, 57080);
stress_case!(stress_masking_parallel_s10, 13, 88, 10, 1090, 57090);
stress_case!(stress_masking_parallel_s11, 6, 128, 11, 1100, 57100);
stress_case!(stress_masking_parallel_s12, 7, 160, 12, 1110, 57110);
stress_case!(stress_masking_parallel_s13, 8, 192, 13, 1120, 57120);
stress_case!(stress_masking_parallel_s14, 9, 224, 14, 1130, 57130);
stress_case!(stress_masking_parallel_s15, 10, 256, 15, 1140, 57140);
stress_case!(stress_masking_parallel_s16, 11, 288, 16, 1150, 57150);
stress_case!(stress_masking_parallel_s17, 12, 320, 17, 1160, 57160);
stress_case!(stress_masking_parallel_s18, 13, 352, 18, 1170, 57170);
stress_case!(stress_masking_parallel_s19, 14, 384, 19, 1180, 57180);
stress_case!(stress_masking_parallel_s20, 15, 416, 20, 1190, 57190);
stress_case!(stress_masking_parallel_s21, 16, 448, 21, 1200, 57200);
stress_case!(stress_masking_parallel_s22, 17, 480, 22, 1210, 57210);

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,367 @@
//! Differential timing-profile adversarial tests.
//! Compare malformed in-range TLS truncation probes with plain web baselines,
//! ensuring masking behavior stays in similar latency buckets.
use super::*;
use crate::config::{UpstreamConfig, UpstreamType};
use crate::protocol::constants::MIN_TLS_CLIENT_HELLO_SIZE;
use std::net::SocketAddr;
use std::time::{Duration, Instant};
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener, TcpStream};
const REPLY_404: &[u8] = b"HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n";
#[derive(Clone, Copy, Debug)]
enum ProbeClass {
MalformedTlsTruncation,
PlainWebBaseline,
}
fn make_test_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
Arc::new(UpstreamManager::new(
vec![UpstreamConfig {
upstream_type: UpstreamType::Direct {
interface: None,
bind_addresses: None,
},
weight: 1,
enabled: true,
scopes: String::new(),
selected_scope: String::new(),
}],
1,
1,
1,
1,
false,
stats,
))
}
fn malformed_tls_probe() -> Vec<u8> {
vec![
0x16,
0x03,
0x03,
((MIN_TLS_CLIENT_HELLO_SIZE >> 8) & 0xff) as u8,
(MIN_TLS_CLIENT_HELLO_SIZE & 0xff) as u8,
0x41,
]
}
fn plain_web_probe() -> Vec<u8> {
b"GET /timing-profile HTTP/1.1\r\nHost: front.example\r\n\r\n".to_vec()
}
fn summarize(samples_ms: &[u128]) -> (f64, u128, u128, u128) {
let mut sorted = samples_ms.to_vec();
sorted.sort_unstable();
let sum: u128 = sorted.iter().copied().sum();
let mean = sum as f64 / sorted.len() as f64;
let min = sorted[0];
let p95_idx = ((sorted.len() as f64) * 0.95).floor() as usize;
let p95 = sorted[p95_idx.min(sorted.len() - 1)];
let max = sorted[sorted.len() - 1];
(mean, min, p95, max)
}
async fn run_generic_once(class: ProbeClass) -> u128 {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let backend_reply = REPLY_404.to_vec();
let accept_task = tokio::spawn({
let backend_reply = backend_reply.clone();
async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut buf = [0u8; 5];
stream.read_exact(&mut buf).await.unwrap();
stream.write_all(&backend_reply).await.unwrap();
}
});
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.timeouts.client_handshake = 1;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = backend_addr.port();
cfg.censorship.mask_proxy_protocol = 0;
if matches!(class, ProbeClass::PlainWebBaseline) {
cfg.general.modes.classic = false;
cfg.general.modes.secure = false;
}
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
let upstream_manager = make_test_upstream_manager(stats.clone());
let replay_checker = Arc::new(ReplayChecker::new(128, Duration::from_secs(60)));
let buffer_pool = Arc::new(BufferPool::new());
let rng = Arc::new(SecureRandom::new());
let route_runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
let ip_tracker = Arc::new(UserIpTracker::new());
let beobachten = Arc::new(BeobachtenStore::new());
let (server_side, mut client_side) = duplex(4096);
let peer: SocketAddr = "203.0.113.210:55110".parse().unwrap();
let handler = tokio::spawn(handle_client_stream(
server_side,
peer,
config,
stats,
upstream_manager,
replay_checker,
buffer_pool,
rng,
None,
route_runtime,
None,
ip_tracker,
beobachten,
false,
));
let probe = match class {
ProbeClass::MalformedTlsTruncation => malformed_tls_probe(),
ProbeClass::PlainWebBaseline => plain_web_probe(),
};
let started = Instant::now();
client_side.write_all(&probe).await.unwrap();
client_side.shutdown().await.unwrap();
let mut observed = vec![0u8; REPLY_404.len()];
tokio::time::timeout(Duration::from_secs(2), client_side.read_exact(&mut observed))
.await
.unwrap()
.unwrap();
assert_eq!(observed, REPLY_404);
tokio::time::timeout(Duration::from_secs(2), accept_task)
.await
.unwrap()
.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(2), handler)
.await
.unwrap()
.unwrap();
started.elapsed().as_millis()
}
async fn run_client_handler_once(class: ProbeClass) -> u128 {
let mask_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = mask_listener.local_addr().unwrap();
let front_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let front_addr = front_listener.local_addr().unwrap();
let backend_reply = REPLY_404.to_vec();
let mask_accept_task = tokio::spawn({
let backend_reply = backend_reply.clone();
async move {
let (mut stream, _) = mask_listener.accept().await.unwrap();
let mut buf = [0u8; 5];
stream.read_exact(&mut buf).await.unwrap();
stream.write_all(&backend_reply).await.unwrap();
}
});
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.timeouts.client_handshake = 1;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = backend_addr.port();
cfg.censorship.mask_proxy_protocol = 0;
if matches!(class, ProbeClass::PlainWebBaseline) {
cfg.general.modes.classic = false;
cfg.general.modes.secure = false;
}
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
let upstream_manager = make_test_upstream_manager(stats.clone());
let replay_checker = Arc::new(ReplayChecker::new(128, Duration::from_secs(60)));
let buffer_pool = Arc::new(BufferPool::new());
let rng = Arc::new(SecureRandom::new());
let route_runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
let ip_tracker = Arc::new(UserIpTracker::new());
let beobachten = Arc::new(BeobachtenStore::new());
let server_task = {
let config = config.clone();
let stats = stats.clone();
let upstream_manager = upstream_manager.clone();
let replay_checker = replay_checker.clone();
let buffer_pool = buffer_pool.clone();
let rng = rng.clone();
let route_runtime = route_runtime.clone();
let ip_tracker = ip_tracker.clone();
let beobachten = beobachten.clone();
tokio::spawn(async move {
let (stream, peer) = front_listener.accept().await.unwrap();
let real_peer_report = Arc::new(std::sync::Mutex::new(None));
ClientHandler::new(
stream,
peer,
config,
stats,
upstream_manager,
replay_checker,
buffer_pool,
rng,
None,
route_runtime,
None,
ip_tracker,
beobachten,
false,
real_peer_report,
)
.run()
.await
})
};
let probe = match class {
ProbeClass::MalformedTlsTruncation => malformed_tls_probe(),
ProbeClass::PlainWebBaseline => plain_web_probe(),
};
let mut client = TcpStream::connect(front_addr).await.unwrap();
let started = Instant::now();
client.write_all(&probe).await.unwrap();
client.shutdown().await.unwrap();
let mut observed = vec![0u8; REPLY_404.len()];
tokio::time::timeout(Duration::from_secs(2), client.read_exact(&mut observed))
.await
.unwrap()
.unwrap();
assert_eq!(observed, REPLY_404);
tokio::time::timeout(Duration::from_secs(2), mask_accept_task)
.await
.unwrap()
.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(2), server_task)
.await
.unwrap()
.unwrap();
started.elapsed().as_millis()
}
#[tokio::test]
async fn differential_timing_generic_malformed_tls_vs_plain_web_mask_profile_similar() {
const ITER: usize = 24;
const BUCKET_MS: u128 = 20;
let mut malformed = Vec::with_capacity(ITER);
let mut plain = Vec::with_capacity(ITER);
for _ in 0..ITER {
malformed.push(run_generic_once(ProbeClass::MalformedTlsTruncation).await);
plain.push(run_generic_once(ProbeClass::PlainWebBaseline).await);
}
let (m_mean, m_min, m_p95, m_max) = summarize(&malformed);
let (p_mean, p_min, p_p95, p_max) = summarize(&plain);
println!(
"TIMING_DIFF generic class=malformed mean_ms={:.2} min_ms={} p95_ms={} max_ms={} bucket_mean={} bucket_p95={}",
m_mean,
m_min,
m_p95,
m_max,
(m_mean as u128) / BUCKET_MS,
m_p95 / BUCKET_MS
);
println!(
"TIMING_DIFF generic class=plain_web mean_ms={:.2} min_ms={} p95_ms={} max_ms={} bucket_mean={} bucket_p95={}",
p_mean,
p_min,
p_p95,
p_max,
(p_mean as u128) / BUCKET_MS,
p_p95 / BUCKET_MS
);
let mean_bucket_delta = ((m_mean as i128) - (p_mean as i128)).abs() / (BUCKET_MS as i128);
let p95_bucket_delta = ((m_p95 as i128) - (p_p95 as i128)).abs() / (BUCKET_MS as i128);
assert!(
mean_bucket_delta <= 1,
"generic timing mean diverged: malformed_mean_ms={:.2}, plain_mean_ms={:.2}",
m_mean,
p_mean
);
assert!(
p95_bucket_delta <= 2,
"generic timing p95 diverged: malformed_p95_ms={}, plain_p95_ms={}",
m_p95,
p_p95
);
}
#[tokio::test]
async fn differential_timing_client_handler_malformed_tls_vs_plain_web_mask_profile_similar() {
const ITER: usize = 16;
const BUCKET_MS: u128 = 20;
let mut malformed = Vec::with_capacity(ITER);
let mut plain = Vec::with_capacity(ITER);
for _ in 0..ITER {
malformed.push(run_client_handler_once(ProbeClass::MalformedTlsTruncation).await);
plain.push(run_client_handler_once(ProbeClass::PlainWebBaseline).await);
}
let (m_mean, m_min, m_p95, m_max) = summarize(&malformed);
let (p_mean, p_min, p_p95, p_max) = summarize(&plain);
println!(
"TIMING_DIFF handler class=malformed mean_ms={:.2} min_ms={} p95_ms={} max_ms={} bucket_mean={} bucket_p95={}",
m_mean,
m_min,
m_p95,
m_max,
(m_mean as u128) / BUCKET_MS,
m_p95 / BUCKET_MS
);
println!(
"TIMING_DIFF handler class=plain_web mean_ms={:.2} min_ms={} p95_ms={} max_ms={} bucket_mean={} bucket_p95={}",
p_mean,
p_min,
p_p95,
p_max,
(p_mean as u128) / BUCKET_MS,
p_p95 / BUCKET_MS
);
let mean_bucket_delta = ((m_mean as i128) - (p_mean as i128)).abs() / (BUCKET_MS as i128);
let p95_bucket_delta = ((m_p95 as i128) - (p_p95 as i128)).abs() / (BUCKET_MS as i128);
assert!(
mean_bucket_delta <= 1,
"handler timing mean diverged: malformed_mean_ms={:.2}, plain_mean_ms={:.2}",
m_mean,
p_mean
);
assert!(
p95_bucket_delta <= 2,
"handler timing p95 diverged: malformed_p95_ms={}, plain_p95_ms={}",
m_p95,
p_p95
);
}

View File

@@ -0,0 +1,200 @@
//! TLS ClientHello size validation tests for proxy anti-censorship security
//! Covers positive, negative, edge, adversarial, and fuzz cases.
//! Ensures proxy does not reveal itself on probe failures.
use super::*;
use crate::config::{UpstreamConfig, UpstreamType};
use crate::protocol::constants::{MAX_TLS_PLAINTEXT_SIZE, MIN_TLS_CLIENT_HELLO_SIZE};
use std::net::SocketAddr;
use std::time::Duration;
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpListener;
fn test_probe_for_len(len: usize) -> [u8; 5] {
[
0x16,
0x03,
0x03,
((len >> 8) & 0xff) as u8,
(len & 0xff) as u8,
]
}
fn make_test_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
Arc::new(UpstreamManager::new(
vec![UpstreamConfig {
upstream_type: UpstreamType::Direct {
interface: None,
bind_addresses: None,
},
weight: 1,
enabled: true,
scopes: String::new(),
selected_scope: String::new(),
}],
1,
1,
1,
1,
false,
stats,
))
}
async fn run_probe_and_assert_masking(len: usize, expect_bad_increment: bool) {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let probe = test_probe_for_len(len);
let backend_reply = b"HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n".to_vec();
let accept_task = tokio::spawn({
let backend_reply = backend_reply.clone();
async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut got = [0u8; 5];
stream.read_exact(&mut got).await.unwrap();
assert_eq!(got, probe, "mask backend must receive original probe bytes");
stream.write_all(&backend_reply).await.unwrap();
}
});
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = backend_addr.port();
cfg.censorship.mask_proxy_protocol = 0;
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
let bad_before = stats.get_connects_bad();
let upstream_manager = make_test_upstream_manager(stats.clone());
let replay_checker = Arc::new(ReplayChecker::new(128, Duration::from_secs(60)));
let buffer_pool = Arc::new(BufferPool::new());
let rng = Arc::new(SecureRandom::new());
let route_runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
let ip_tracker = Arc::new(UserIpTracker::new());
let beobachten = Arc::new(BeobachtenStore::new());
let (server_side, mut client_side) = duplex(4096);
let peer: SocketAddr = "203.0.113.123:55123".parse().unwrap();
let handler = tokio::spawn(handle_client_stream(
server_side,
peer,
config,
stats.clone(),
upstream_manager,
replay_checker,
buffer_pool,
rng,
None,
route_runtime,
None,
ip_tracker,
beobachten,
false,
));
client_side.write_all(&probe).await.unwrap();
let mut observed = vec![0u8; backend_reply.len()];
client_side.read_exact(&mut observed).await.unwrap();
assert_eq!(observed, backend_reply, "invalid TLS path must be masked as a real site");
drop(client_side);
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
.await
.unwrap()
.unwrap();
accept_task.await.unwrap();
let expected_bad = if expect_bad_increment { bad_before + 1 } else { bad_before };
assert_eq!(
stats.get_connects_bad(),
expected_bad,
"unexpected connects_bad classification for tls_len={len}"
);
}
#[tokio::test]
async fn tls_client_hello_lower_bound_minus_one_is_masked_and_counted_bad() {
run_probe_and_assert_masking(MIN_TLS_CLIENT_HELLO_SIZE - 1, true).await;
}
#[tokio::test]
async fn tls_client_hello_upper_bound_plus_one_is_masked_and_counted_bad() {
run_probe_and_assert_masking(MAX_TLS_PLAINTEXT_SIZE + 1, true).await;
}
#[tokio::test]
async fn tls_client_hello_header_zero_len_is_masked_and_counted_bad() {
run_probe_and_assert_masking(0, true).await;
}
#[test]
fn tls_client_hello_len_bounds_unit_adversarial_sweep() {
let cases = [
(0usize, false),
(1usize, false),
(99usize, false),
(100usize, true),
(101usize, true),
(511usize, true),
(512usize, true),
(MAX_TLS_PLAINTEXT_SIZE - 1, true),
(MAX_TLS_PLAINTEXT_SIZE, true),
(MAX_TLS_PLAINTEXT_SIZE + 1, false),
(u16::MAX as usize, false),
(usize::MAX, false),
];
for (len, expected) in cases {
assert_eq!(
tls_clienthello_len_in_bounds(len),
expected,
"unexpected bounds result for tls_len={len}"
);
}
}
#[test]
fn tls_client_hello_len_bounds_light_fuzz_deterministic_lcg() {
let mut x: u32 = 0xA5A5_5A5A;
for _ in 0..2_048 {
x = x.wrapping_mul(1_664_525).wrapping_add(1_013_904_223);
let base = (x as usize) & 0x3fff;
let len = match x & 0x7 {
0 => MIN_TLS_CLIENT_HELLO_SIZE - 1,
1 => MIN_TLS_CLIENT_HELLO_SIZE,
2 => MIN_TLS_CLIENT_HELLO_SIZE + 1,
3 => MAX_TLS_PLAINTEXT_SIZE - 1,
4 => MAX_TLS_PLAINTEXT_SIZE,
5 => MAX_TLS_PLAINTEXT_SIZE + 1,
_ => base,
};
let expect_bad = !(MIN_TLS_CLIENT_HELLO_SIZE..=MAX_TLS_PLAINTEXT_SIZE).contains(&len);
assert_eq!(
tls_clienthello_len_in_bounds(len),
!expect_bad,
"deterministic fuzz mismatch for tls_len={len}"
);
}
}
#[test]
fn tls_client_hello_len_bounds_stress_many_evaluations() {
for _ in 0..100_000 {
assert!(tls_clienthello_len_in_bounds(MIN_TLS_CLIENT_HELLO_SIZE));
assert!(tls_clienthello_len_in_bounds(MAX_TLS_PLAINTEXT_SIZE));
assert!(!tls_clienthello_len_in_bounds(MIN_TLS_CLIENT_HELLO_SIZE - 1));
assert!(!tls_clienthello_len_in_bounds(MAX_TLS_PLAINTEXT_SIZE + 1));
}
}
#[tokio::test]
async fn tls_client_hello_masking_integration_repeated_small_probes() {
for _ in 0..25 {
run_probe_and_assert_masking(MIN_TLS_CLIENT_HELLO_SIZE - 1, true).await;
}
}

View File

@@ -0,0 +1,561 @@
//! Black-hat adversarial tests for truncated in-range TLS ClientHello probes.
//! These tests encode a strict anti-probing expectation: malformed TLS traffic
//! should still be masked as a legitimate website response.
use super::*;
use crate::config::{UpstreamConfig, UpstreamType};
use crate::protocol::constants::MIN_TLS_CLIENT_HELLO_SIZE;
use std::net::SocketAddr;
use std::time::Duration;
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener, TcpStream};
use tokio::time::sleep;
fn in_range_probe_header() -> [u8; 5] {
[
0x16,
0x03,
0x03,
((MIN_TLS_CLIENT_HELLO_SIZE >> 8) & 0xff) as u8,
(MIN_TLS_CLIENT_HELLO_SIZE & 0xff) as u8,
]
}
fn make_test_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
Arc::new(UpstreamManager::new(
vec![UpstreamConfig {
upstream_type: UpstreamType::Direct {
interface: None,
bind_addresses: None,
},
weight: 1,
enabled: true,
scopes: String::new(),
selected_scope: String::new(),
}],
1,
1,
1,
1,
false,
stats,
))
}
fn truncated_in_range_record(actual_body_len: usize) -> Vec<u8> {
let mut out = in_range_probe_header().to_vec();
out.extend(std::iter::repeat_n(0x41, actual_body_len));
out
}
async fn write_fragmented<W: AsyncWriteExt + Unpin>(writer: &mut W, bytes: &[u8], chunks: &[usize], delay_ms: u64) {
let mut offset = 0usize;
for &chunk in chunks {
if offset >= bytes.len() {
break;
}
let end = (offset + chunk).min(bytes.len());
writer.write_all(&bytes[offset..end]).await.unwrap();
offset = end;
if delay_ms > 0 {
sleep(Duration::from_millis(delay_ms)).await;
}
}
if offset < bytes.len() {
writer.write_all(&bytes[offset..]).await.unwrap();
}
}
async fn run_blackhat_generic_fragmented_probe_should_mask(
payload: Vec<u8>,
chunks: &[usize],
delay_ms: u64,
backend_reply: Vec<u8>,
) {
let mask_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let mask_addr = mask_listener.local_addr().unwrap();
let probe_header = in_range_probe_header();
let mask_accept_task = tokio::spawn({
let backend_reply = backend_reply.clone();
async move {
let (mut stream, _) = mask_listener.accept().await.unwrap();
let mut got = [0u8; 5];
stream.read_exact(&mut got).await.unwrap();
assert_eq!(got, probe_header);
stream.write_all(&backend_reply).await.unwrap();
}
});
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.timeouts.client_handshake = 1;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = mask_addr.port();
cfg.censorship.mask_proxy_protocol = 0;
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
let upstream_manager = make_test_upstream_manager(stats.clone());
let replay_checker = Arc::new(ReplayChecker::new(128, Duration::from_secs(60)));
let buffer_pool = Arc::new(BufferPool::new());
let rng = Arc::new(SecureRandom::new());
let route_runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
let ip_tracker = Arc::new(UserIpTracker::new());
let beobachten = Arc::new(BeobachtenStore::new());
let (server_side, mut client_side) = duplex(4096);
let peer: SocketAddr = "203.0.113.202:55002".parse().unwrap();
let handler = tokio::spawn(handle_client_stream(
server_side,
peer,
config,
stats,
upstream_manager,
replay_checker,
buffer_pool,
rng,
None,
route_runtime,
None,
ip_tracker,
beobachten,
false,
));
write_fragmented(&mut client_side, &payload, chunks, delay_ms).await;
client_side.shutdown().await.unwrap();
let mut observed = vec![0u8; backend_reply.len()];
tokio::time::timeout(Duration::from_secs(2), client_side.read_exact(&mut observed))
.await
.unwrap()
.unwrap();
assert_eq!(observed, backend_reply);
tokio::time::timeout(Duration::from_secs(2), mask_accept_task)
.await
.unwrap()
.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(2), handler)
.await
.unwrap()
.unwrap();
}
async fn run_blackhat_client_handler_fragmented_probe_should_mask(
payload: Vec<u8>,
chunks: &[usize],
delay_ms: u64,
backend_reply: Vec<u8>,
) {
let mask_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let mask_addr = mask_listener.local_addr().unwrap();
let front_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let front_addr = front_listener.local_addr().unwrap();
let probe_header = in_range_probe_header();
let mask_accept_task = tokio::spawn({
let backend_reply = backend_reply.clone();
async move {
let (mut stream, _) = mask_listener.accept().await.unwrap();
let mut got = [0u8; 5];
stream.read_exact(&mut got).await.unwrap();
assert_eq!(got, probe_header);
stream.write_all(&backend_reply).await.unwrap();
}
});
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.timeouts.client_handshake = 1;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = mask_addr.port();
cfg.censorship.mask_proxy_protocol = 0;
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
let upstream_manager = make_test_upstream_manager(stats.clone());
let replay_checker = Arc::new(ReplayChecker::new(128, Duration::from_secs(60)));
let buffer_pool = Arc::new(BufferPool::new());
let rng = Arc::new(SecureRandom::new());
let route_runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
let ip_tracker = Arc::new(UserIpTracker::new());
let beobachten = Arc::new(BeobachtenStore::new());
let server_task = {
let config = config.clone();
let stats = stats.clone();
let upstream_manager = upstream_manager.clone();
let replay_checker = replay_checker.clone();
let buffer_pool = buffer_pool.clone();
let rng = rng.clone();
let route_runtime = route_runtime.clone();
let ip_tracker = ip_tracker.clone();
let beobachten = beobachten.clone();
tokio::spawn(async move {
let (stream, peer) = front_listener.accept().await.unwrap();
let real_peer_report = Arc::new(std::sync::Mutex::new(None));
ClientHandler::new(
stream,
peer,
config,
stats,
upstream_manager,
replay_checker,
buffer_pool,
rng,
None,
route_runtime,
None,
ip_tracker,
beobachten,
false,
real_peer_report,
)
.run()
.await
})
};
let mut client = TcpStream::connect(front_addr).await.unwrap();
write_fragmented(&mut client, &payload, chunks, delay_ms).await;
client.shutdown().await.unwrap();
let mut observed = vec![0u8; backend_reply.len()];
tokio::time::timeout(Duration::from_secs(2), client.read_exact(&mut observed))
.await
.unwrap()
.unwrap();
assert_eq!(observed, backend_reply);
tokio::time::timeout(Duration::from_secs(2), mask_accept_task)
.await
.unwrap()
.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(2), server_task)
.await
.unwrap()
.unwrap();
}
#[tokio::test]
async fn blackhat_truncated_in_range_clienthello_generic_stream_should_mask_but_leaks() {
let mask_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let mask_addr = mask_listener.local_addr().unwrap();
let backend_reply = b"HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n".to_vec();
let probe = in_range_probe_header();
let mask_accept_task = tokio::spawn({
let backend_reply = backend_reply.clone();
async move {
let (mut stream, _) = mask_listener.accept().await.unwrap();
let mut got = [0u8; 5];
stream.read_exact(&mut got).await.unwrap();
assert_eq!(got, probe);
stream.write_all(&backend_reply).await.unwrap();
}
});
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.timeouts.client_handshake = 1;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = mask_addr.port();
cfg.censorship.mask_proxy_protocol = 0;
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
let upstream_manager = make_test_upstream_manager(stats.clone());
let replay_checker = Arc::new(ReplayChecker::new(128, Duration::from_secs(60)));
let buffer_pool = Arc::new(BufferPool::new());
let rng = Arc::new(SecureRandom::new());
let route_runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
let ip_tracker = Arc::new(UserIpTracker::new());
let beobachten = Arc::new(BeobachtenStore::new());
let (server_side, mut client_side) = duplex(4096);
let peer: SocketAddr = "203.0.113.201:55001".parse().unwrap();
let handler = tokio::spawn(handle_client_stream(
server_side,
peer,
config,
stats,
upstream_manager,
replay_checker,
buffer_pool,
rng,
None,
route_runtime,
None,
ip_tracker,
beobachten,
false,
));
client_side.write_all(&probe).await.unwrap();
client_side.shutdown().await.unwrap();
// Security expectation: even malformed in-range TLS should be masked.
// Current code leaks by returning EOF/timeout instead of masking.
let mut observed = vec![0u8; backend_reply.len()];
tokio::time::timeout(Duration::from_secs(2), client_side.read_exact(&mut observed))
.await
.unwrap()
.unwrap();
assert_eq!(observed, backend_reply);
tokio::time::timeout(Duration::from_secs(2), mask_accept_task)
.await
.unwrap()
.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(2), handler)
.await
.unwrap()
.unwrap();
}
#[tokio::test]
async fn blackhat_truncated_in_range_clienthello_client_handler_should_mask_but_leaks() {
let mask_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let mask_addr = mask_listener.local_addr().unwrap();
let front_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let front_addr = front_listener.local_addr().unwrap();
let backend_reply = b"HTTP/1.1 403 Forbidden\r\nContent-Length: 0\r\n\r\n".to_vec();
let probe = in_range_probe_header();
let mask_accept_task = tokio::spawn({
let backend_reply = backend_reply.clone();
async move {
let (mut stream, _) = mask_listener.accept().await.unwrap();
let mut got = [0u8; 5];
stream.read_exact(&mut got).await.unwrap();
assert_eq!(got, probe);
stream.write_all(&backend_reply).await.unwrap();
}
});
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.timeouts.client_handshake = 1;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = mask_addr.port();
cfg.censorship.mask_proxy_protocol = 0;
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
let upstream_manager = make_test_upstream_manager(stats.clone());
let replay_checker = Arc::new(ReplayChecker::new(128, Duration::from_secs(60)));
let buffer_pool = Arc::new(BufferPool::new());
let rng = Arc::new(SecureRandom::new());
let route_runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
let ip_tracker = Arc::new(UserIpTracker::new());
let beobachten = Arc::new(BeobachtenStore::new());
let server_task = {
let config = config.clone();
let stats = stats.clone();
let upstream_manager = upstream_manager.clone();
let replay_checker = replay_checker.clone();
let buffer_pool = buffer_pool.clone();
let rng = rng.clone();
let route_runtime = route_runtime.clone();
let ip_tracker = ip_tracker.clone();
let beobachten = beobachten.clone();
tokio::spawn(async move {
let (stream, peer) = front_listener.accept().await.unwrap();
let real_peer_report = Arc::new(std::sync::Mutex::new(None));
ClientHandler::new(
stream,
peer,
config,
stats,
upstream_manager,
replay_checker,
buffer_pool,
rng,
None,
route_runtime,
None,
ip_tracker,
beobachten,
false,
real_peer_report,
)
.run()
.await
})
};
let mut client = TcpStream::connect(front_addr).await.unwrap();
client.write_all(&probe).await.unwrap();
client.shutdown().await.unwrap();
// Security expectation: malformed in-range TLS should still be masked.
let mut observed = vec![0u8; backend_reply.len()];
tokio::time::timeout(Duration::from_secs(2), client.read_exact(&mut observed))
.await
.unwrap()
.unwrap();
assert_eq!(observed, backend_reply);
tokio::time::timeout(Duration::from_secs(2), mask_accept_task)
.await
.unwrap()
.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(2), server_task)
.await
.unwrap()
.unwrap();
}
#[tokio::test]
async fn blackhat_generic_truncated_min_body_1_should_mask_but_leaks() {
run_blackhat_generic_fragmented_probe_should_mask(
truncated_in_range_record(1),
&[6],
0,
b"HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_generic_truncated_min_body_8_should_mask_but_leaks() {
run_blackhat_generic_fragmented_probe_should_mask(
truncated_in_range_record(8),
&[13],
0,
b"HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_generic_truncated_min_body_99_should_mask_but_leaks() {
run_blackhat_generic_fragmented_probe_should_mask(
truncated_in_range_record(MIN_TLS_CLIENT_HELLO_SIZE - 1),
&[5, MIN_TLS_CLIENT_HELLO_SIZE - 1],
0,
b"HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_generic_fragmented_header_then_close_should_mask_but_leaks() {
run_blackhat_generic_fragmented_probe_should_mask(
truncated_in_range_record(0),
&[1, 1, 1, 1, 1],
0,
b"HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_generic_fragmented_header_plus_partial_body_should_mask_but_leaks() {
run_blackhat_generic_fragmented_probe_should_mask(
truncated_in_range_record(5),
&[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
0,
b"HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_generic_slowloris_fragmented_min_probe_should_mask_but_times_out() {
run_blackhat_generic_fragmented_probe_should_mask(
truncated_in_range_record(1),
&[1, 1, 1, 1, 1, 1],
250,
b"HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_client_handler_truncated_min_body_1_should_mask_but_leaks() {
run_blackhat_client_handler_fragmented_probe_should_mask(
truncated_in_range_record(1),
&[6],
0,
b"HTTP/1.1 403 Forbidden\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_client_handler_truncated_min_body_8_should_mask_but_leaks() {
run_blackhat_client_handler_fragmented_probe_should_mask(
truncated_in_range_record(8),
&[13],
0,
b"HTTP/1.1 403 Forbidden\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_client_handler_truncated_min_body_99_should_mask_but_leaks() {
run_blackhat_client_handler_fragmented_probe_should_mask(
truncated_in_range_record(MIN_TLS_CLIENT_HELLO_SIZE - 1),
&[5, MIN_TLS_CLIENT_HELLO_SIZE - 1],
0,
b"HTTP/1.1 403 Forbidden\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_client_handler_fragmented_header_then_close_should_mask_but_leaks() {
run_blackhat_client_handler_fragmented_probe_should_mask(
truncated_in_range_record(0),
&[1, 1, 1, 1, 1],
0,
b"HTTP/1.1 403 Forbidden\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_client_handler_fragmented_header_plus_partial_body_should_mask_but_leaks() {
run_blackhat_client_handler_fragmented_probe_should_mask(
truncated_in_range_record(5),
&[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
0,
b"HTTP/1.1 403 Forbidden\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_client_handler_slowloris_fragmented_min_probe_should_mask_but_times_out() {
run_blackhat_client_handler_fragmented_probe_should_mask(
truncated_in_range_record(1),
&[1, 1, 1, 1, 1, 1],
250,
b"HTTP/1.1 403 Forbidden\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,467 @@
use super::*;
use std::sync::Arc;
use std::net::{IpAddr, Ipv4Addr};
use std::time::{Duration, Instant};
use crate::crypto::sha256;
fn make_valid_mtproto_handshake(secret_hex: &str, proto_tag: ProtoTag, dc_idx: i16) -> [u8; HANDSHAKE_LEN] {
let secret = hex::decode(secret_hex).expect("secret hex must decode");
let mut handshake = [0x5Au8; HANDSHAKE_LEN];
for (idx, b) in handshake[SKIP_LEN..SKIP_LEN + PREKEY_LEN + IV_LEN]
.iter_mut()
.enumerate()
{
*b = (idx as u8).wrapping_add(1);
}
let dec_prekey = &handshake[SKIP_LEN..SKIP_LEN + PREKEY_LEN];
let dec_iv_bytes = &handshake[SKIP_LEN + PREKEY_LEN..SKIP_LEN + PREKEY_LEN + IV_LEN];
let mut dec_key_input = Vec::with_capacity(PREKEY_LEN + secret.len());
dec_key_input.extend_from_slice(dec_prekey);
dec_key_input.extend_from_slice(&secret);
let dec_key = sha256(&dec_key_input);
let mut dec_iv_arr = [0u8; IV_LEN];
dec_iv_arr.copy_from_slice(dec_iv_bytes);
let dec_iv = u128::from_be_bytes(dec_iv_arr);
let mut stream = AesCtr::new(&dec_key, dec_iv);
let keystream = stream.encrypt(&[0u8; HANDSHAKE_LEN]);
let mut target_plain = [0u8; HANDSHAKE_LEN];
target_plain[PROTO_TAG_POS..PROTO_TAG_POS + 4].copy_from_slice(&proto_tag.to_bytes());
target_plain[DC_IDX_POS..DC_IDX_POS + 2].copy_from_slice(&dc_idx.to_le_bytes());
for idx in PROTO_TAG_POS..HANDSHAKE_LEN {
handshake[idx] = target_plain[idx] ^ keystream[idx];
}
handshake
}
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
auth_probe_test_lock()
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner())
}
fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
let mut cfg = ProxyConfig::default();
cfg.access.users.clear();
cfg.access.users.insert("user".to_string(), secret_hex.to_string());
cfg.access.ignore_time_skew = true;
cfg.general.modes.secure = true;
cfg
}
// ------------------------------------------------------------------
// Mutational Bit-Flipping Tests (OWASP ASVS 5.1.4)
// ------------------------------------------------------------------
#[tokio::test]
async fn mtproto_handshake_bit_flip_anywhere_rejected() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let secret_hex = "11223344556677889900aabbccddeeff";
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
let config = test_config_with_secret_hex(secret_hex);
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
let peer: SocketAddr = "192.0.2.1:12345".parse().unwrap();
// Baseline check
let res = handle_mtproto_handshake(&base, tokio::io::empty(), tokio::io::sink(), peer, &config, &replay_checker, false, None).await;
match res {
HandshakeResult::Success(_) => {},
_ => panic!("Baseline failed: expected Success"),
}
// Flip bits in the encrypted part (beyond the key material)
for byte_pos in SKIP_LEN..HANDSHAKE_LEN {
let mut h = base;
h[byte_pos] ^= 0x01; // Flip 1 bit
let res = handle_mtproto_handshake(&h, tokio::io::empty(), tokio::io::sink(), peer, &config, &replay_checker, false, None).await;
assert!(matches!(res, HandshakeResult::BadClient { .. }), "Flip at byte {byte_pos} bit 0 must be rejected");
}
}
// ------------------------------------------------------------------
// Adversarial Probing / Timing Neutrality (OWASP ASVS 5.1.7)
// ------------------------------------------------------------------
#[tokio::test]
async fn mtproto_handshake_timing_neutrality_mocked() {
let secret_hex = "00112233445566778899aabbccddeeff";
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 1);
let config = test_config_with_secret_hex(secret_hex);
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
let peer: SocketAddr = "192.0.2.2:54321".parse().unwrap();
const ITER: usize = 50;
let mut start = Instant::now();
for _ in 0..ITER {
let _ = handle_mtproto_handshake(&base, tokio::io::empty(), tokio::io::sink(), peer, &config, &replay_checker, false, None).await;
}
let duration_success = start.elapsed();
start = Instant::now();
for i in 0..ITER {
let mut h = base;
h[SKIP_LEN + (i % 48)] ^= 0xFF;
let _ = handle_mtproto_handshake(&h, tokio::io::empty(), tokio::io::sink(), peer, &config, &replay_checker, false, None).await;
}
let duration_fail = start.elapsed();
let avg_diff_ms = (duration_success.as_millis() as f64 - duration_fail.as_millis() as f64).abs() / ITER as f64;
// Threshold (loose for CI)
assert!(avg_diff_ms < 100.0, "Timing difference too large: {} ms/iter", avg_diff_ms);
}
// ------------------------------------------------------------------
// Stress Tests (OWASP ASVS 5.1.6)
// ------------------------------------------------------------------
#[tokio::test]
async fn auth_probe_throttle_saturation_stress() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let now = Instant::now();
// Record enough failures for one IP to trigger backoff
let target_ip = IpAddr::V4(Ipv4Addr::new(1, 1, 1, 1));
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
auth_probe_record_failure(target_ip, now);
}
assert!(auth_probe_is_throttled(target_ip, now));
// Stress test with many unique IPs
for i in 0..500u32 {
let ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, (i % 256) as u8));
auth_probe_record_failure(ip, now);
}
let tracked = AUTH_PROBE_STATE
.get()
.map(|state| state.len())
.unwrap_or(0);
assert!(
tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES,
"auth probe state grew past hard cap: {tracked} > {AUTH_PROBE_TRACK_MAX_ENTRIES}"
);
}
#[tokio::test]
async fn mtproto_handshake_abridged_prefix_rejected() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let mut handshake = [0x5Au8; HANDSHAKE_LEN];
handshake[0] = 0xef; // Abridged prefix
let config = ProxyConfig::default();
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
let peer: SocketAddr = "192.0.2.3:12345".parse().unwrap();
let res = handle_mtproto_handshake(&handshake, tokio::io::empty(), tokio::io::sink(), peer, &config, &replay_checker, false, None).await;
// MTProxy stops immediately on 0xef
assert!(matches!(res, HandshakeResult::BadClient { .. }));
}
#[tokio::test]
async fn mtproto_handshake_preferred_user_mismatch_continues() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let secret1_hex = "11111111111111111111111111111111";
let secret2_hex = "22222222222222222222222222222222";
let base = make_valid_mtproto_handshake(secret2_hex, ProtoTag::Secure, 1);
let mut config = ProxyConfig::default();
config.access.users.insert("user1".to_string(), secret1_hex.to_string());
config.access.users.insert("user2".to_string(), secret2_hex.to_string());
config.access.ignore_time_skew = true;
config.general.modes.secure = true;
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
let peer: SocketAddr = "192.0.2.4:12345".parse().unwrap();
// Even if we prefer user1, if user2 matches, it should succeed.
let res = handle_mtproto_handshake(&base, tokio::io::empty(), tokio::io::sink(), peer, &config, &replay_checker, false, Some("user1")).await;
if let HandshakeResult::Success((_, _, success)) = res {
assert_eq!(success.user, "user2");
} else {
panic!("Handshake failed even though user2 matched");
}
}
#[tokio::test]
async fn mtproto_handshake_concurrent_flood_stability() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let secret_hex = "00112233445566778899aabbccddeeff";
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 1);
let mut config = test_config_with_secret_hex(secret_hex);
config.access.ignore_time_skew = true;
let replay_checker = Arc::new(ReplayChecker::new(1024, Duration::from_secs(60)));
let config = Arc::new(config);
let mut tasks = Vec::new();
for i in 0..50 {
let base = base;
let config = Arc::clone(&config);
let replay_checker = Arc::clone(&replay_checker);
let peer: SocketAddr = format!("192.0.2.{}:12345", (i % 254) + 1).parse().unwrap();
tasks.push(tokio::spawn(async move {
let res = handle_mtproto_handshake(&base, tokio::io::empty(), tokio::io::sink(), peer, &config, &replay_checker, false, None).await;
matches!(res, HandshakeResult::Success(_))
}));
}
// We don't necessarily care if they all succeed (some might fail due to replay if they hit the same chunk),
// but the system must not panic or hang.
for task in tasks {
let _ = task.await.unwrap();
}
}
#[tokio::test]
async fn mtproto_replay_is_rejected_across_distinct_peers() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let secret_hex = "0123456789abcdeffedcba9876543210";
let handshake = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
let config = test_config_with_secret_hex(secret_hex);
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
let first_peer: SocketAddr = "198.51.100.10:41001".parse().unwrap();
let second_peer: SocketAddr = "198.51.100.11:41002".parse().unwrap();
let first = handle_mtproto_handshake(
&handshake,
tokio::io::empty(),
tokio::io::sink(),
first_peer,
&config,
&replay_checker,
false,
None,
)
.await;
assert!(matches!(first, HandshakeResult::Success(_)));
let replay = handle_mtproto_handshake(
&handshake,
tokio::io::empty(),
tokio::io::sink(),
second_peer,
&config,
&replay_checker,
false,
None,
)
.await;
assert!(matches!(replay, HandshakeResult::BadClient { .. }));
}
#[tokio::test]
async fn mtproto_blackhat_mutation_corpus_never_panics_and_stays_fail_closed() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let secret_hex = "89abcdef012345670123456789abcdef";
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
let config = test_config_with_secret_hex(secret_hex);
let replay_checker = ReplayChecker::new(8192, Duration::from_secs(60));
for i in 0..512usize {
let mut mutated = base;
let pos = (SKIP_LEN + (i * 31) % (HANDSHAKE_LEN - SKIP_LEN)).min(HANDSHAKE_LEN - 1);
mutated[pos] ^= ((i as u8) | 1).rotate_left((i % 8) as u32);
let peer: SocketAddr = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(198, 18, (i / 254) as u8, (i % 254 + 1) as u8)),
42000 + (i % 1000) as u16,
);
let res = tokio::time::timeout(
Duration::from_millis(250),
handle_mtproto_handshake(
&mutated,
tokio::io::empty(),
tokio::io::sink(),
peer,
&config,
&replay_checker,
false,
None,
),
)
.await
.expect("fuzzed mutation must complete in bounded time");
assert!(
matches!(res, HandshakeResult::BadClient { .. } | HandshakeResult::Success(_)),
"mutation corpus must stay within explicit handshake outcomes"
);
}
}
#[tokio::test]
async fn auth_probe_success_clears_throttled_peer_state() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let target_ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 90));
let now = Instant::now();
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
auth_probe_record_failure(target_ip, now);
}
assert!(auth_probe_is_throttled(target_ip, now));
auth_probe_record_success(target_ip);
assert!(
!auth_probe_is_throttled(target_ip, now + Duration::from_millis(1)),
"successful auth must clear per-peer throttle state"
);
}
#[tokio::test]
async fn mtproto_invalid_storm_over_cap_keeps_probe_map_hard_bounded() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let secret_hex = "00112233445566778899aabbccddeeff";
let mut invalid = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
invalid[SKIP_LEN + 3] ^= 0xff;
let config = test_config_with_secret_hex(secret_hex);
let replay_checker = ReplayChecker::new(64, Duration::from_secs(60));
for i in 0..(AUTH_PROBE_TRACK_MAX_ENTRIES + 512) {
let peer: SocketAddr = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(10, (i / 65535) as u8, ((i / 255) % 255) as u8, (i % 255 + 1) as u8)),
43000 + (i % 20000) as u16,
);
let res = handle_mtproto_handshake(
&invalid,
tokio::io::empty(),
tokio::io::sink(),
peer,
&config,
&replay_checker,
false,
None,
)
.await;
assert!(matches!(res, HandshakeResult::BadClient { .. }));
}
let tracked = AUTH_PROBE_STATE
.get()
.map(|state| state.len())
.unwrap_or(0);
assert!(
tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES,
"probe map must remain bounded under invalid storm: {tracked}"
);
}
#[tokio::test]
async fn mtproto_property_style_multi_bit_mutations_fail_closed_or_auth_only() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let secret_hex = "f0e1d2c3b4a5968778695a4b3c2d1e0f";
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
let config = test_config_with_secret_hex(secret_hex);
let replay_checker = ReplayChecker::new(10_000, Duration::from_secs(60));
let mut seed: u64 = 0xC0FF_EE12_3456_789A;
for i in 0..2_048usize {
let mut mutated = base;
for _ in 0..4 {
seed ^= seed << 7;
seed ^= seed >> 9;
seed ^= seed << 8;
let idx = SKIP_LEN + (seed as usize % (HANDSHAKE_LEN - SKIP_LEN));
mutated[idx] ^= ((seed >> 11) as u8).wrapping_add(1);
}
let peer: SocketAddr = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(10, 123, (i / 254) as u8, (i % 254 + 1) as u8)),
45000 + (i % 2000) as u16,
);
let outcome = tokio::time::timeout(
Duration::from_millis(250),
handle_mtproto_handshake(
&mutated,
tokio::io::empty(),
tokio::io::sink(),
peer,
&config,
&replay_checker,
false,
None,
),
)
.await
.expect("mutation iteration must complete in bounded time");
assert!(
matches!(outcome, HandshakeResult::BadClient { .. } | HandshakeResult::Success(_)),
"mutations must remain fail-closed/auth-only"
);
}
}
#[tokio::test]
#[ignore = "heavy soak; run manually"]
async fn mtproto_blackhat_20k_mutation_soak_never_panics() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let secret_hex = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
let config = test_config_with_secret_hex(secret_hex);
let replay_checker = ReplayChecker::new(50_000, Duration::from_secs(120));
let mut seed: u64 = 0xA5A5_5A5A_DEAD_BEEF;
for i in 0..20_000usize {
let mut mutated = base;
for _ in 0..3 {
seed ^= seed << 7;
seed ^= seed >> 9;
seed ^= seed << 8;
let idx = SKIP_LEN + (seed as usize % (HANDSHAKE_LEN - SKIP_LEN));
mutated[idx] ^= ((seed >> 19) as u8).wrapping_add(1);
}
let peer: SocketAddr = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(172, 31, (i / 254) as u8, (i % 254 + 1) as u8)),
47000 + (i % 15000) as u16,
);
let _ = tokio::time::timeout(
Duration::from_millis(250),
handle_mtproto_handshake(
&mutated,
tokio::io::empty(),
tokio::io::sink(),
peer,
&config,
&replay_checker,
false,
None,
),
)
.await
.expect("soak mutation must complete in bounded time");
}
}

View File

@@ -0,0 +1,270 @@
use super::*;
use crate::config::ProxyConfig;
use crate::crypto::AesCtr;
use crate::crypto::sha256;
use crate::protocol::constants::ProtoTag;
use crate::stats::ReplayChecker;
use std::net::SocketAddr;
use std::sync::MutexGuard;
use tokio::time::{timeout, Duration as TokioDuration};
fn make_mtproto_handshake_with_proto_bytes(
secret_hex: &str,
proto_bytes: [u8; 4],
dc_idx: i16,
) -> [u8; HANDSHAKE_LEN] {
let secret = hex::decode(secret_hex).expect("secret hex must decode");
let mut handshake = [0x5Au8; HANDSHAKE_LEN];
for (idx, b) in handshake[SKIP_LEN..SKIP_LEN + PREKEY_LEN + IV_LEN]
.iter_mut()
.enumerate()
{
*b = (idx as u8).wrapping_add(1);
}
let dec_prekey = &handshake[SKIP_LEN..SKIP_LEN + PREKEY_LEN];
let dec_iv_bytes = &handshake[SKIP_LEN + PREKEY_LEN..SKIP_LEN + PREKEY_LEN + IV_LEN];
let mut dec_key_input = Vec::with_capacity(PREKEY_LEN + secret.len());
dec_key_input.extend_from_slice(dec_prekey);
dec_key_input.extend_from_slice(&secret);
let dec_key = sha256(&dec_key_input);
let mut dec_iv_arr = [0u8; IV_LEN];
dec_iv_arr.copy_from_slice(dec_iv_bytes);
let dec_iv = u128::from_be_bytes(dec_iv_arr);
let mut stream = AesCtr::new(&dec_key, dec_iv);
let keystream = stream.encrypt(&[0u8; HANDSHAKE_LEN]);
let mut target_plain = [0u8; HANDSHAKE_LEN];
target_plain[PROTO_TAG_POS..PROTO_TAG_POS + 4].copy_from_slice(&proto_bytes);
target_plain[DC_IDX_POS..DC_IDX_POS + 2].copy_from_slice(&dc_idx.to_le_bytes());
for idx in PROTO_TAG_POS..HANDSHAKE_LEN {
handshake[idx] = target_plain[idx] ^ keystream[idx];
}
handshake
}
fn make_valid_mtproto_handshake(secret_hex: &str, proto_tag: ProtoTag, dc_idx: i16) -> [u8; HANDSHAKE_LEN] {
make_mtproto_handshake_with_proto_bytes(secret_hex, proto_tag.to_bytes(), dc_idx)
}
fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
let mut cfg = ProxyConfig::default();
cfg.access.users.clear();
cfg.access.users.insert("user".to_string(), secret_hex.to_string());
cfg.access.ignore_time_skew = true;
cfg.general.modes.secure = true;
cfg
}
fn auth_probe_test_guard() -> MutexGuard<'static, ()> {
auth_probe_test_lock()
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner())
}
#[tokio::test]
async fn mtproto_handshake_duplicate_digest_is_replayed_on_second_attempt() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let secret_hex = "11223344556677889900aabbccddeeff";
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
let config = test_config_with_secret_hex(secret_hex);
let replay_checker = ReplayChecker::new(128, TokioDuration::from_secs(60));
let peer: SocketAddr = "192.0.2.1:12345".parse().unwrap();
let first = handle_mtproto_handshake(
&base,
tokio::io::empty(),
tokio::io::sink(),
peer,
&config,
&replay_checker,
false,
None,
)
.await;
assert!(matches!(first, HandshakeResult::Success(_)));
let second = handle_mtproto_handshake(
&base,
tokio::io::empty(),
tokio::io::sink(),
peer,
&config,
&replay_checker,
false,
None,
)
.await;
assert!(matches!(second, HandshakeResult::BadClient { .. }));
clear_auth_probe_state_for_testing();
}
#[tokio::test]
async fn mtproto_handshake_fuzz_corpus_never_panics_and_stays_fail_closed() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let secret_hex = "00112233445566778899aabbccddeeff";
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 1);
let config = test_config_with_secret_hex(secret_hex);
let replay_checker = ReplayChecker::new(128, TokioDuration::from_secs(60));
let peer: SocketAddr = "192.0.2.2:54321".parse().unwrap();
let mut corpus = Vec::<[u8; HANDSHAKE_LEN]>::new();
corpus.push(make_mtproto_handshake_with_proto_bytes(
secret_hex,
[0x00, 0x00, 0x00, 0x00],
1,
));
corpus.push(make_mtproto_handshake_with_proto_bytes(
secret_hex,
[0xff, 0xff, 0xff, 0xff],
1,
));
corpus.push(make_valid_mtproto_handshake(
"ffeeddccbbaa99887766554433221100",
ProtoTag::Secure,
1,
));
let mut seed = 0xF0F0_F00D_BAAD_CAFEu64;
for _ in 0..32 {
let mut mutated = base;
for _ in 0..4 {
seed = seed.wrapping_mul(2862933555777941757).wrapping_add(3037000493);
let idx = SKIP_LEN + (seed as usize % (PREKEY_LEN + IV_LEN));
mutated[idx] ^= ((seed >> 19) as u8).wrapping_add(1);
}
corpus.push(mutated);
}
for (idx, input) in corpus.into_iter().enumerate() {
let result = timeout(
TokioDuration::from_secs(1),
handle_mtproto_handshake(
&input,
tokio::io::empty(),
tokio::io::sink(),
peer,
&config,
&replay_checker,
false,
None,
),
)
.await
.expect("fuzzed handshake must complete in time");
assert!(
matches!(result, HandshakeResult::BadClient { .. }),
"corpus item {idx} must fail closed"
);
}
clear_auth_probe_state_for_testing();
}
#[tokio::test]
async fn mtproto_handshake_mixed_corpus_never_panics_and_exact_duplicates_are_rejected() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let secret_hex = "99887766554433221100ffeeddccbbaa";
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 4);
let config = test_config_with_secret_hex(secret_hex);
let replay_checker = ReplayChecker::new(256, TokioDuration::from_secs(60));
let peer: SocketAddr = "192.0.2.44:45444".parse().unwrap();
let first = timeout(
TokioDuration::from_secs(1),
handle_mtproto_handshake(
&base,
tokio::io::empty(),
tokio::io::sink(),
peer,
&config,
&replay_checker,
false,
None,
),
)
.await
.expect("base handshake must not hang");
assert!(matches!(first, HandshakeResult::Success(_)));
let replay = timeout(
TokioDuration::from_secs(1),
handle_mtproto_handshake(
&base,
tokio::io::empty(),
tokio::io::sink(),
peer,
&config,
&replay_checker,
false,
None,
),
)
.await
.expect("duplicate handshake must not hang");
assert!(matches!(replay, HandshakeResult::BadClient { .. }));
let mut corpus = Vec::<[u8; HANDSHAKE_LEN]>::new();
let mut prekey_flip = base;
prekey_flip[SKIP_LEN] ^= 0x80;
corpus.push(prekey_flip);
let mut iv_flip = base;
iv_flip[SKIP_LEN + PREKEY_LEN] ^= 0x01;
corpus.push(iv_flip);
let mut tail_flip = base;
tail_flip[SKIP_LEN + PREKEY_LEN + IV_LEN - 1] ^= 0x40;
corpus.push(tail_flip);
let mut seed = 0xBADC_0FFE_EE11_4242u64;
for _ in 0..24 {
let mut mutated = base;
for _ in 0..3 {
seed = seed.wrapping_mul(6364136223846793005).wrapping_add(1);
let idx = SKIP_LEN + (seed as usize % (PREKEY_LEN + IV_LEN));
mutated[idx] ^= ((seed >> 16) as u8).wrapping_add(1);
}
corpus.push(mutated);
}
for (idx, input) in corpus.iter().enumerate() {
let result = timeout(
TokioDuration::from_secs(1),
handle_mtproto_handshake(
input,
tokio::io::empty(),
tokio::io::sink(),
peer,
&config,
&replay_checker,
false,
None,
),
)
.await
.expect("fuzzed handshake must complete in time");
assert!(
matches!(result, HandshakeResult::BadClient { .. }),
"mixed corpus item {idx} must fail closed"
);
}
clear_auth_probe_state_for_testing();
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,762 @@
use super::*;
use std::sync::Arc;
use tokio::io::duplex;
use tokio::net::TcpListener;
use tokio::time::{Instant, Duration};
use crate::config::ProxyConfig;
use crate::proxy::relay::relay_bidirectional;
use crate::stats::Stats;
use crate::stats::beobachten::BeobachtenStore;
use crate::stream::BufferPool;
// ------------------------------------------------------------------
// Probing Indistinguishability (OWASP ASVS 5.1.7)
// ------------------------------------------------------------------
#[tokio::test]
async fn masking_probes_indistinguishable_timing() {
let mut config = ProxyConfig::default();
config.censorship.mask = true;
config.censorship.mask_host = Some("127.0.0.1".to_string());
config.censorship.mask_port = 80; // Should timeout/refuse
let peer: SocketAddr = "192.0.2.10:443".parse().unwrap();
let local_addr: SocketAddr = "127.0.0.1:443".parse().unwrap();
let beobachten = BeobachtenStore::new();
// Test different probe types
let probes = vec![
(b"GET / HTTP/1.1\r\nHost: x\r\n\r\n".to_vec(), "HTTP"),
(b"SSH-2.0-probe".to_vec(), "SSH"),
(vec![0x16, 0x03, 0x03, 0x00, 0x05, 0x01, 0x00, 0x00, 0x01, 0x00], "TLS-scanner"),
(vec![0x42; 5], "port-scanner"),
];
for (probe, type_name) in probes {
let (client_reader, _client_writer) = duplex(256);
let (_client_visible_reader, client_visible_writer) = duplex(256);
let start = Instant::now();
handle_bad_client(
client_reader,
client_visible_writer,
&probe,
peer,
local_addr,
&config,
&beobachten,
).await;
let elapsed = start.elapsed();
// We expect any outcome to take roughly MASK_TIMEOUT (50ms in tests)
// to mask whether the backend was reachable or refused.
assert!(elapsed >= Duration::from_millis(30), "Probe {type_name} finished too fast: {elapsed:?}");
}
}
// ------------------------------------------------------------------
// Masking Budget Stress Tests (OWASP ASVS 5.1.6)
// ------------------------------------------------------------------
#[tokio::test]
async fn masking_budget_stress_under_load() {
let mut config = ProxyConfig::default();
config.censorship.mask = true;
config.censorship.mask_host = Some("127.0.0.1".to_string());
config.censorship.mask_port = 1; // Unlikely port
let peer: SocketAddr = "192.0.2.20:443".parse().unwrap();
let local_addr: SocketAddr = "127.0.0.1:443".parse().unwrap();
let beobachten = Arc::new(BeobachtenStore::new());
let mut tasks = Vec::new();
for _ in 0..50 {
let (client_reader, _client_writer) = duplex(256);
let (_client_visible_reader, client_visible_writer) = duplex(256);
let config = config.clone();
let beobachten = Arc::clone(&beobachten);
tasks.push(tokio::spawn(async move {
let start = Instant::now();
handle_bad_client(
client_reader,
client_visible_writer,
b"probe",
peer,
local_addr,
&config,
&beobachten,
).await;
start.elapsed()
}));
}
for task in tasks {
let elapsed = task.await.unwrap();
assert!(elapsed >= Duration::from_millis(30), "Stress probe finished too fast: {elapsed:?}");
}
}
// ------------------------------------------------------------------
// detect_client_type Fingerprint Check
// ------------------------------------------------------------------
#[test]
fn test_detect_client_type_boundary_cases() {
// 9 bytes = port-scanner
assert_eq!(detect_client_type(&[0x42; 9]), "port-scanner");
// 10 bytes = unknown
assert_eq!(detect_client_type(&[0x42; 10]), "unknown");
// HTTP verbs without trailing space
assert_eq!(detect_client_type(b"GET/"), "port-scanner"); // because len < 10
assert_eq!(detect_client_type(b"GET /path"), "HTTP");
}
// ------------------------------------------------------------------
// Priority 2: Slowloris and Slow Read Attacks (OWASP ASVS 5.1.5)
// ------------------------------------------------------------------
#[tokio::test]
async fn masking_slowloris_client_idle_timeout_rejected() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let initial = b"GET / HTTP/1.1\r\nHost: front.example\r\n\r\n".to_vec();
let accept_task = tokio::spawn({
let initial = initial.clone();
async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut observed = vec![0u8; initial.len()];
stream.read_exact(&mut observed).await.unwrap();
assert_eq!(observed, initial);
let mut drip = [0u8; 1];
let drip_read = tokio::time::timeout(Duration::from_millis(220), stream.read_exact(&mut drip)).await;
assert!(
drip_read.is_err() || drip_read.unwrap().is_err(),
"backend must not receive post-timeout slowloris drip bytes"
);
}
});
let mut config = ProxyConfig::default();
config.censorship.mask = true;
config.censorship.mask_host = Some("127.0.0.1".to_string());
config.censorship.mask_port = backend_addr.port();
let beobachten = BeobachtenStore::new();
let peer: SocketAddr = "192.0.2.10:12345".parse().unwrap();
let local: SocketAddr = "192.0.2.1:443".parse().unwrap();
let (mut client_writer, client_reader) = duplex(1024);
let (_client_visible_reader, client_visible_writer) = duplex(1024);
let handle = tokio::spawn(async move {
handle_bad_client(
client_reader,
client_visible_writer,
&initial,
peer,
local,
&config,
&beobachten,
)
.await;
});
tokio::time::sleep(Duration::from_millis(160)).await;
let _ = client_writer.write_all(b"X").await;
handle.await.unwrap();
accept_task.await.unwrap();
}
// ------------------------------------------------------------------
// Priority 2: Fallback Server Down / Fingerprinting (OWASP ASVS 5.1.7)
// ------------------------------------------------------------------
#[tokio::test]
async fn masking_fallback_down_mimics_timeout() {
let mut config = ProxyConfig::default();
config.censorship.mask = true;
config.censorship.mask_host = Some("127.0.0.1".to_string());
config.censorship.mask_port = 1; // Unlikely port
let (server_reader, server_writer) = duplex(1024);
let beobachten = BeobachtenStore::new();
let peer: SocketAddr = "192.0.2.12:12345".parse().unwrap();
let local: SocketAddr = "192.0.2.1:443".parse().unwrap();
let start = Instant::now();
handle_bad_client(server_reader, server_writer, b"GET / HTTP/1.1\r\n", peer, local, &config, &beobachten).await;
let elapsed = start.elapsed();
// It should wait for MASK_TIMEOUT (50ms in tests) even if connection was refused immediately
assert!(elapsed >= Duration::from_millis(40), "Must respect connect budget even on failure: {:?}", elapsed);
}
// ------------------------------------------------------------------
// Priority 2: SSRF Prevention (OWASP ASVS 5.1.2)
// ------------------------------------------------------------------
#[tokio::test]
async fn masking_ssrf_resolve_internal_ranges_blocked() {
use crate::network::dns_overrides::resolve_socket_addr;
let blocked_ips = ["127.0.0.1", "169.254.169.254", "10.0.0.1", "192.168.1.1", "0.0.0.0"];
for ip in blocked_ips {
assert!(
resolve_socket_addr(ip, 80).is_none(),
"runtime DNS overrides must not resolve unconfigured literal host targets"
);
}
}
#[tokio::test]
async fn masking_unknown_proxy_protocol_version_falls_back_to_v1_unknown_header() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let accept_task = tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut header = [0u8; 15];
stream.read_exact(&mut header).await.unwrap();
assert_eq!(&header, b"PROXY UNKNOWN\r\n");
let mut payload = [0u8; 5];
stream.read_exact(&mut payload).await.unwrap();
assert_eq!(&payload, b"probe");
});
let mut config = ProxyConfig::default();
config.censorship.mask = true;
config.censorship.mask_host = Some("127.0.0.1".to_string());
config.censorship.mask_port = backend_addr.port();
config.censorship.mask_proxy_protocol = 255;
let peer: SocketAddr = "198.51.100.77:50001".parse().unwrap();
let local_addr: SocketAddr = "[2001:db8::10]:443".parse().unwrap();
let beobachten = BeobachtenStore::new();
let (client_reader, _client_writer) = duplex(128);
let (_client_visible_reader, client_visible_writer) = duplex(128);
handle_bad_client(
client_reader,
client_visible_writer,
b"probe",
peer,
local_addr,
&config,
&beobachten,
)
.await;
accept_task.await.unwrap();
}
#[tokio::test]
async fn masking_zero_length_initial_data_does_not_hang_or_panic() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let accept_task = tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut one = [0u8; 1];
let n = tokio::time::timeout(Duration::from_millis(150), stream.read(&mut one))
.await
.unwrap()
.unwrap();
assert_eq!(n, 0, "backend must observe clean EOF for empty initial payload");
});
let mut config = ProxyConfig::default();
config.censorship.mask = true;
config.censorship.mask_host = Some("127.0.0.1".to_string());
config.censorship.mask_port = backend_addr.port();
let peer: SocketAddr = "203.0.113.70:50002".parse().unwrap();
let local: SocketAddr = "127.0.0.1:443".parse().unwrap();
let beobachten = BeobachtenStore::new();
let (client_reader, client_writer) = duplex(64);
drop(client_writer);
let (_client_visible_reader, client_visible_writer) = duplex(64);
handle_bad_client(
client_reader,
client_visible_writer,
b"",
peer,
local,
&config,
&beobachten,
)
.await;
accept_task.await.unwrap();
}
#[tokio::test]
async fn masking_oversized_initial_payload_is_forwarded_verbatim() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let payload = vec![0xA5u8; 32 * 1024];
let accept_task = tokio::spawn({
let payload = payload.clone();
async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut observed = vec![0u8; payload.len()];
stream.read_exact(&mut observed).await.unwrap();
assert_eq!(observed, payload, "large initial payload must stay byte-for-byte");
}
});
let mut config = ProxyConfig::default();
config.censorship.mask = true;
config.censorship.mask_host = Some("127.0.0.1".to_string());
config.censorship.mask_port = backend_addr.port();
let peer: SocketAddr = "203.0.113.71:50003".parse().unwrap();
let local: SocketAddr = "127.0.0.1:443".parse().unwrap();
let beobachten = BeobachtenStore::new();
let (client_reader, _client_writer) = duplex(64);
let (_client_visible_reader, client_visible_writer) = duplex(64);
handle_bad_client(
client_reader,
client_visible_writer,
&payload,
peer,
local,
&config,
&beobachten,
)
.await;
accept_task.await.unwrap();
}
#[tokio::test]
async fn masking_refused_backend_keeps_constantish_timing_floor_under_burst() {
let mut config = ProxyConfig::default();
config.censorship.mask = true;
config.censorship.mask_host = Some("127.0.0.1".to_string());
config.censorship.mask_port = 1;
let peer: SocketAddr = "203.0.113.72:50004".parse().unwrap();
let local: SocketAddr = "127.0.0.1:443".parse().unwrap();
let beobachten = BeobachtenStore::new();
for _ in 0..16 {
let (client_reader, _client_writer) = duplex(128);
let (_client_visible_reader, client_visible_writer) = duplex(128);
let started = Instant::now();
handle_bad_client(
client_reader,
client_visible_writer,
b"GET / HTTP/1.1\r\n",
peer,
local,
&config,
&beobachten,
)
.await;
assert!(
started.elapsed() >= Duration::from_millis(30),
"refused-backend path must keep timing floor to reduce fingerprinting"
);
}
}
#[tokio::test]
async fn masking_backend_half_close_then_client_half_close_completes_without_hang() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let accept_task = tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut pre = [0u8; 4];
stream.read_exact(&mut pre).await.unwrap();
assert_eq!(&pre, b"PING");
stream.write_all(b"PONG").await.unwrap();
stream.shutdown().await.unwrap();
});
let mut config = ProxyConfig::default();
config.censorship.mask = true;
config.censorship.mask_host = Some("127.0.0.1".to_string());
config.censorship.mask_port = backend_addr.port();
let peer: SocketAddr = "203.0.113.73:50005".parse().unwrap();
let local: SocketAddr = "127.0.0.1:443".parse().unwrap();
let beobachten = BeobachtenStore::new();
let (mut client_writer, client_reader) = duplex(256);
let (mut client_visible_reader, client_visible_writer) = duplex(256);
let handle = tokio::spawn(async move {
handle_bad_client(
client_reader,
client_visible_writer,
b"PING",
peer,
local,
&config,
&beobachten,
)
.await;
});
client_writer.shutdown().await.unwrap();
let mut got = [0u8; 4];
client_visible_reader.read_exact(&mut got).await.unwrap();
assert_eq!(&got, b"PONG");
timeout(Duration::from_secs(2), handle)
.await
.expect("masking task must terminate after bilateral half-close")
.unwrap();
accept_task.await.unwrap();
}
#[tokio::test]
async fn chaos_burst_reconnect_storm_for_masking_and_relay_concurrently() {
const MASKING_SESSIONS: usize = 48;
const RELAY_SESSIONS: usize = 48;
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let backend_reply = b"HTTP/1.1 200 OK\r\nContent-Length: 2\r\n\r\nOK".to_vec();
let backend_task = tokio::spawn({
let backend_reply = backend_reply.clone();
async move {
for _ in 0..MASKING_SESSIONS {
let (mut stream, _) = listener.accept().await.unwrap();
let mut req = [0u8; 32];
stream.read_exact(&mut req).await.unwrap();
assert!(
req.starts_with(b"GET /storm/"),
"masking backend must receive storm reconnect probes"
);
stream.write_all(&backend_reply).await.unwrap();
stream.shutdown().await.unwrap();
}
}
});
let mut config = ProxyConfig::default();
config.censorship.mask = true;
config.censorship.mask_host = Some("127.0.0.1".to_string());
config.censorship.mask_port = backend_addr.port();
config.censorship.mask_proxy_protocol = 0;
let config = Arc::new(config);
let beobachten = Arc::new(BeobachtenStore::new());
let peer: SocketAddr = "198.51.100.200:55555".parse().unwrap();
let local: SocketAddr = "127.0.0.1:443".parse().unwrap();
let mut masking_tasks = Vec::with_capacity(MASKING_SESSIONS);
for i in 0..MASKING_SESSIONS {
let config = Arc::clone(&config);
let beobachten = Arc::clone(&beobachten);
let expected_reply = backend_reply.clone();
masking_tasks.push(tokio::spawn(async move {
let mut probe = [0u8; 32];
let template = format!("GET /storm/{i:04} HTTP/1.1\r\n\r\n");
let bytes = template.as_bytes();
probe[..bytes.len()].copy_from_slice(bytes);
let (client_reader, client_writer) = duplex(256);
drop(client_writer);
let (mut client_visible_reader, client_visible_writer) = duplex(1024);
let handle = tokio::spawn(async move {
handle_bad_client(
client_reader,
client_visible_writer,
&probe,
peer,
local,
&config,
&beobachten,
)
.await;
});
let mut observed = vec![0u8; expected_reply.len()];
client_visible_reader.read_exact(&mut observed).await.unwrap();
assert_eq!(observed, expected_reply);
timeout(Duration::from_secs(2), handle)
.await
.expect("masking reconnect task must complete")
.unwrap();
}));
}
let mut relay_tasks = Vec::with_capacity(RELAY_SESSIONS);
for i in 0..RELAY_SESSIONS {
relay_tasks.push(tokio::spawn(async move {
let stats = Arc::new(Stats::new());
let (mut client_peer, relay_client) = duplex(4096);
let (relay_server, mut server_peer) = duplex(4096);
let (client_reader, client_writer) = tokio::io::split(relay_client);
let (server_reader, server_writer) = tokio::io::split(relay_server);
let relay_task = tokio::spawn(relay_bidirectional(
client_reader,
client_writer,
server_reader,
server_writer,
1024,
1024,
"chaos-storm-relay",
stats,
None,
Arc::new(BufferPool::new()),
));
let c2s = vec![(i as u8).wrapping_add(1); 64];
client_peer.write_all(&c2s).await.unwrap();
let mut c2s_seen = vec![0u8; c2s.len()];
server_peer.read_exact(&mut c2s_seen).await.unwrap();
assert_eq!(c2s_seen, c2s);
let s2c = vec![(i as u8).wrapping_add(17); 96];
server_peer.write_all(&s2c).await.unwrap();
let mut s2c_seen = vec![0u8; s2c.len()];
client_peer.read_exact(&mut s2c_seen).await.unwrap();
assert_eq!(s2c_seen, s2c);
drop(client_peer);
drop(server_peer);
timeout(Duration::from_secs(2), relay_task)
.await
.expect("relay reconnect task must complete")
.unwrap()
.unwrap();
}));
}
for task in masking_tasks {
timeout(Duration::from_secs(3), task)
.await
.expect("masking storm join must complete")
.unwrap();
}
for task in relay_tasks {
timeout(Duration::from_secs(3), task)
.await
.expect("relay storm join must complete")
.unwrap();
}
timeout(Duration::from_secs(3), backend_task)
.await
.expect("masking backend accept loop must complete")
.unwrap();
}
fn read_env_usize_or_default(name: &str, default: usize) -> usize {
match std::env::var(name) {
Ok(raw) => match raw.parse::<usize>() {
Ok(parsed) if parsed > 0 => parsed,
_ => default,
},
Err(_) => default,
}
}
#[tokio::test]
#[ignore = "heavy soak; run manually"]
async fn chaos_burst_reconnect_storm_for_masking_and_relay_multiwave_soak() {
let waves = read_env_usize_or_default("CHAOS_WAVES", 4);
let masking_per_wave = read_env_usize_or_default("CHAOS_MASKING_PER_WAVE", 160);
let relay_per_wave = read_env_usize_or_default("CHAOS_RELAY_PER_WAVE", 160);
let total_masking = waves * masking_per_wave;
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let backend_reply = b"HTTP/1.1 204 No Content\r\nContent-Length: 0\r\n\r\n".to_vec();
let backend_task = tokio::spawn({
let backend_reply = backend_reply.clone();
async move {
for _ in 0..total_masking {
let (mut stream, _) = listener.accept().await.unwrap();
let mut req = [0u8; 32];
stream.read_exact(&mut req).await.unwrap();
assert!(
req.starts_with(b"GET /storm/"),
"mask backend must only receive storm probes"
);
stream.write_all(&backend_reply).await.unwrap();
stream.shutdown().await.unwrap();
}
}
});
let mut config = ProxyConfig::default();
config.censorship.mask = true;
config.censorship.mask_host = Some("127.0.0.1".to_string());
config.censorship.mask_port = backend_addr.port();
config.censorship.mask_proxy_protocol = 0;
let config = Arc::new(config);
let beobachten = Arc::new(BeobachtenStore::new());
let peer: SocketAddr = "198.51.100.201:56565".parse().unwrap();
let local: SocketAddr = "127.0.0.1:443".parse().unwrap();
for wave in 0..waves {
let mut masking_tasks = Vec::with_capacity(masking_per_wave);
for i in 0..masking_per_wave {
let config = Arc::clone(&config);
let beobachten = Arc::clone(&beobachten);
let expected_reply = backend_reply.clone();
masking_tasks.push(tokio::spawn(async move {
let mut probe = [0u8; 32];
let template = format!("GET /storm/{wave:02}-{i:03}\r\n\r\n");
let bytes = template.as_bytes();
probe[..bytes.len()].copy_from_slice(bytes);
let (client_reader, client_writer) = duplex(256);
drop(client_writer);
let (mut client_visible_reader, client_visible_writer) = duplex(1024);
let handle = tokio::spawn(async move {
handle_bad_client(
client_reader,
client_visible_writer,
&probe,
peer,
local,
&config,
&beobachten,
)
.await;
});
let mut observed = vec![0u8; expected_reply.len()];
client_visible_reader.read_exact(&mut observed).await.unwrap();
assert_eq!(observed, expected_reply);
timeout(Duration::from_secs(3), handle)
.await
.expect("masking storm task must complete")
.unwrap();
}));
}
let mut relay_tasks = Vec::with_capacity(relay_per_wave);
for i in 0..relay_per_wave {
relay_tasks.push(tokio::spawn(async move {
let stats = Arc::new(Stats::new());
let (mut client_peer, relay_client) = duplex(4096);
let (relay_server, mut server_peer) = duplex(4096);
let (client_reader, client_writer) = tokio::io::split(relay_client);
let (server_reader, server_writer) = tokio::io::split(relay_server);
let relay_task = tokio::spawn(relay_bidirectional(
client_reader,
client_writer,
server_reader,
server_writer,
1024,
1024,
"chaos-multiwave-relay",
stats,
None,
Arc::new(BufferPool::new()),
));
let c2s = vec![(wave as u8).wrapping_add(i as u8).wrapping_add(1); 32];
client_peer.write_all(&c2s).await.unwrap();
let mut c2s_seen = vec![0u8; c2s.len()];
server_peer.read_exact(&mut c2s_seen).await.unwrap();
assert_eq!(c2s_seen, c2s);
let s2c = vec![(wave as u8).wrapping_add(i as u8).wrapping_add(17); 48];
server_peer.write_all(&s2c).await.unwrap();
let mut s2c_seen = vec![0u8; s2c.len()];
client_peer.read_exact(&mut s2c_seen).await.unwrap();
assert_eq!(s2c_seen, s2c);
drop(client_peer);
drop(server_peer);
timeout(Duration::from_secs(3), relay_task)
.await
.expect("relay storm task must complete")
.unwrap()
.unwrap();
}));
}
for task in masking_tasks {
timeout(Duration::from_secs(6), task)
.await
.expect("masking wave task join must complete")
.unwrap();
}
for task in relay_tasks {
timeout(Duration::from_secs(6), task)
.await
.expect("relay wave task join must complete")
.unwrap();
}
}
timeout(Duration::from_secs(8), backend_task)
.await
.expect("mask backend must complete all accepted storm sessions")
.unwrap();
}
#[tokio::test]
#[ignore = "heavy soak; run manually"]
async fn masking_timing_bucket_soak_refused_backend_stays_within_narrow_band() {
let mut config = ProxyConfig::default();
config.censorship.mask = true;
config.censorship.mask_host = Some("127.0.0.1".to_string());
config.censorship.mask_port = 1;
let peer: SocketAddr = "203.0.113.74:50006".parse().unwrap();
let local: SocketAddr = "127.0.0.1:443".parse().unwrap();
let beobachten = BeobachtenStore::new();
let mut samples = Vec::with_capacity(128);
for _ in 0..128 {
let (client_reader, _client_writer) = duplex(128);
let (_client_visible_reader, client_visible_writer) = duplex(128);
let started = Instant::now();
handle_bad_client(
client_reader,
client_visible_writer,
b"GET / HTTP/1.1\r\n",
peer,
local,
&config,
&beobachten,
)
.await;
samples.push(started.elapsed().as_millis());
}
samples.sort_unstable();
let p10 = samples[samples.len() / 10];
let p90 = samples[(samples.len() * 9) / 10];
assert!(
p90.saturating_sub(p10) <= 40,
"timing spread too wide for refused-backend masking path: p10={p10}ms p90={p90}ms"
);
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,799 @@
use super::*;
use crate::crypto::AesCtr;
use crate::stats::Stats;
use crate::stream::{BufferPool, CryptoReader};
use std::sync::{Arc, Mutex, OnceLock};
use std::sync::atomic::AtomicU64;
use tokio::io::AsyncWriteExt;
use tokio::io::duplex;
use tokio::time::{Duration as TokioDuration, Instant as TokioInstant, timeout};
fn make_crypto_reader<T>(reader: T) -> CryptoReader<T>
where
T: AsyncRead + Unpin + Send + 'static,
{
let key = [0u8; 32];
let iv = 0u128;
CryptoReader::new(reader, AesCtr::new(&key, iv))
}
fn encrypt_for_reader(plaintext: &[u8]) -> Vec<u8> {
let key = [0u8; 32];
let iv = 0u128;
let mut cipher = AesCtr::new(&key, iv);
cipher.encrypt(plaintext)
}
fn make_forensics(conn_id: u64, started_at: Instant) -> RelayForensicsState {
RelayForensicsState {
trace_id: 0xA000_0000 + conn_id,
conn_id,
user: format!("idle-test-user-{conn_id}"),
peer: "127.0.0.1:50000".parse().expect("peer parse must succeed"),
peer_hash: hash_ip("127.0.0.1".parse().expect("ip parse must succeed")),
started_at,
bytes_c2me: 0,
bytes_me2c: Arc::new(AtomicU64::new(0)),
desync_all_full: false,
}
}
fn make_idle_policy(soft_ms: u64, hard_ms: u64, grace_ms: u64) -> RelayClientIdlePolicy {
RelayClientIdlePolicy {
enabled: true,
soft_idle: Duration::from_millis(soft_ms),
hard_idle: Duration::from_millis(hard_ms),
grace_after_downstream_activity: Duration::from_millis(grace_ms),
legacy_frame_read_timeout: Duration::from_millis(hard_ms),
}
}
fn idle_pressure_test_lock() -> &'static Mutex<()> {
static TEST_LOCK: OnceLock<Mutex<()>> = OnceLock::new();
TEST_LOCK.get_or_init(|| Mutex::new(()))
}
fn acquire_idle_pressure_test_lock() -> std::sync::MutexGuard<'static, ()> {
match idle_pressure_test_lock().lock() {
Ok(guard) => guard,
Err(poisoned) => poisoned.into_inner(),
}
}
#[tokio::test]
async fn idle_policy_soft_mark_then_hard_close_increments_reason_counters() {
let (reader, _writer) = duplex(1024);
let mut crypto_reader = make_crypto_reader(reader);
let buffer_pool = Arc::new(BufferPool::new());
let stats = Stats::new();
let session_started_at = Instant::now();
let forensics = make_forensics(1, session_started_at);
let mut frame_counter = 0u64;
let mut idle_state = RelayClientIdleState::new(session_started_at);
let idle_policy = make_idle_policy(40, 120, 0);
let last_downstream_activity_ms = AtomicU64::new(0);
let start = TokioInstant::now();
let result = timeout(
TokioDuration::from_secs(2),
read_client_payload_with_idle_policy(
&mut crypto_reader,
ProtoTag::Intermediate,
1024,
&buffer_pool,
&forensics,
&mut frame_counter,
&stats,
&idle_policy,
&mut idle_state,
&last_downstream_activity_ms,
session_started_at,
),
)
.await
.expect("idle test must complete");
assert!(matches!(result, Err(ProxyError::Io(ref e)) if e.kind() == std::io::ErrorKind::TimedOut));
let err_text = match result {
Err(ProxyError::Io(ref e)) => e.to_string(),
_ => String::new(),
};
assert!(
err_text.contains("middle-relay hard idle timeout"),
"hard close must expose a clear timeout reason"
);
assert!(
start.elapsed() >= TokioDuration::from_millis(80),
"hard timeout must not trigger before idle deadline window"
);
assert_eq!(stats.get_relay_idle_soft_mark_total(), 1);
assert_eq!(stats.get_relay_idle_hard_close_total(), 1);
}
#[tokio::test]
async fn idle_policy_downstream_activity_grace_extends_hard_deadline() {
let (reader, _writer) = duplex(1024);
let mut crypto_reader = make_crypto_reader(reader);
let buffer_pool = Arc::new(BufferPool::new());
let stats = Stats::new();
let session_started_at = Instant::now();
let forensics = make_forensics(2, session_started_at);
let mut frame_counter = 0u64;
let mut idle_state = RelayClientIdleState::new(session_started_at);
let idle_policy = make_idle_policy(30, 60, 100);
let last_downstream_activity_ms = AtomicU64::new(20);
let start = TokioInstant::now();
let result = timeout(
TokioDuration::from_secs(2),
read_client_payload_with_idle_policy(
&mut crypto_reader,
ProtoTag::Intermediate,
1024,
&buffer_pool,
&forensics,
&mut frame_counter,
&stats,
&idle_policy,
&mut idle_state,
&last_downstream_activity_ms,
session_started_at,
),
)
.await
.expect("grace test must complete");
assert!(matches!(result, Err(ProxyError::Io(ref e)) if e.kind() == std::io::ErrorKind::TimedOut));
assert!(
start.elapsed() >= TokioDuration::from_millis(100),
"recent downstream activity must extend hard idle deadline"
);
}
#[tokio::test]
async fn relay_idle_policy_disabled_keeps_legacy_timeout_behavior() {
let (reader, _writer) = duplex(1024);
let mut crypto_reader = make_crypto_reader(reader);
let buffer_pool = Arc::new(BufferPool::new());
let stats = Stats::new();
let forensics = make_forensics(3, Instant::now());
let mut frame_counter = 0u64;
let result = read_client_payload(
&mut crypto_reader,
ProtoTag::Intermediate,
1024,
Duration::from_millis(60),
&buffer_pool,
&forensics,
&mut frame_counter,
&stats,
)
.await;
assert!(matches!(result, Err(ProxyError::Io(ref e)) if e.kind() == std::io::ErrorKind::TimedOut));
let err_text = match result {
Err(ProxyError::Io(ref e)) => e.to_string(),
_ => String::new(),
};
assert!(
err_text.contains("middle-relay client frame read timeout"),
"legacy mode must keep expected timeout reason"
);
assert_eq!(stats.get_relay_idle_soft_mark_total(), 0);
assert_eq!(stats.get_relay_idle_hard_close_total(), 0);
}
#[tokio::test]
async fn adversarial_partial_frame_trickle_cannot_bypass_hard_idle_close() {
let (reader, mut writer) = duplex(1024);
let mut crypto_reader = make_crypto_reader(reader);
let buffer_pool = Arc::new(BufferPool::new());
let stats = Stats::new();
let session_started_at = Instant::now();
let forensics = make_forensics(4, session_started_at);
let mut frame_counter = 0u64;
let mut idle_state = RelayClientIdleState::new(session_started_at);
let idle_policy = make_idle_policy(30, 90, 0);
let last_downstream_activity_ms = AtomicU64::new(0);
let mut plaintext = Vec::with_capacity(12);
plaintext.extend_from_slice(&8u32.to_le_bytes());
plaintext.extend_from_slice(&[1, 2, 3, 4, 5, 6, 7, 8]);
let encrypted = encrypt_for_reader(&plaintext);
writer
.write_all(&encrypted[..1])
.await
.expect("must write a single trickle byte");
let result = timeout(
TokioDuration::from_secs(2),
read_client_payload_with_idle_policy(
&mut crypto_reader,
ProtoTag::Intermediate,
1024,
&buffer_pool,
&forensics,
&mut frame_counter,
&stats,
&idle_policy,
&mut idle_state,
&last_downstream_activity_ms,
session_started_at,
),
)
.await
.expect("partial frame trickle test must complete");
assert!(matches!(result, Err(ProxyError::Io(ref e)) if e.kind() == std::io::ErrorKind::TimedOut));
assert_eq!(frame_counter, 0, "partial trickle must not count as a valid frame");
}
#[tokio::test]
async fn successful_client_frame_resets_soft_idle_mark() {
let (reader, mut writer) = duplex(1024);
let mut crypto_reader = make_crypto_reader(reader);
let buffer_pool = Arc::new(BufferPool::new());
let stats = Stats::new();
let session_started_at = Instant::now();
let forensics = make_forensics(5, session_started_at);
let mut frame_counter = 0u64;
let mut idle_state = RelayClientIdleState::new(session_started_at);
idle_state.soft_idle_marked = true;
let idle_policy = make_idle_policy(200, 300, 0);
let last_downstream_activity_ms = AtomicU64::new(0);
let payload = [9u8, 8, 7, 6, 5, 4, 3, 2];
let mut plaintext = Vec::with_capacity(4 + payload.len());
plaintext.extend_from_slice(&(payload.len() as u32).to_le_bytes());
plaintext.extend_from_slice(&payload);
let encrypted = encrypt_for_reader(&plaintext);
writer
.write_all(&encrypted)
.await
.expect("must write full encrypted frame");
let read = read_client_payload_with_idle_policy(
&mut crypto_reader,
ProtoTag::Intermediate,
1024,
&buffer_pool,
&forensics,
&mut frame_counter,
&stats,
&idle_policy,
&mut idle_state,
&last_downstream_activity_ms,
session_started_at,
)
.await
.expect("frame read must succeed")
.expect("frame must be returned");
assert_eq!(read.0.as_ref(), &payload);
assert_eq!(frame_counter, 1);
assert!(
!idle_state.soft_idle_marked,
"a valid client frame must clear soft-idle mark"
);
}
#[tokio::test]
async fn protocol_desync_small_frame_updates_reason_counter() {
let (reader, mut writer) = duplex(1024);
let mut crypto_reader = make_crypto_reader(reader);
let buffer_pool = Arc::new(BufferPool::new());
let stats = Stats::new();
let forensics = make_forensics(6, Instant::now());
let mut frame_counter = 0u64;
let mut plaintext = Vec::with_capacity(7);
plaintext.extend_from_slice(&3u32.to_le_bytes());
plaintext.extend_from_slice(&[1u8, 2, 3]);
let encrypted = encrypt_for_reader(&plaintext);
writer.write_all(&encrypted).await.expect("must write frame");
let result = read_client_payload(
&mut crypto_reader,
ProtoTag::Secure,
1024,
TokioDuration::from_secs(1),
&buffer_pool,
&forensics,
&mut frame_counter,
&stats,
)
.await;
assert!(matches!(result, Err(ProxyError::Proxy(ref msg)) if msg.contains("Frame too small")));
assert_eq!(stats.get_relay_protocol_desync_close_total(), 1);
}
#[tokio::test]
async fn stress_many_idle_sessions_fail_closed_without_hang() {
let mut tasks = Vec::with_capacity(24);
for idx in 0..24u64 {
tasks.push(tokio::spawn(async move {
let (reader, _writer) = duplex(256);
let mut crypto_reader = make_crypto_reader(reader);
let buffer_pool = Arc::new(BufferPool::new());
let stats = Stats::new();
let session_started_at = Instant::now();
let forensics = make_forensics(100 + idx, session_started_at);
let mut frame_counter = 0u64;
let mut idle_state = RelayClientIdleState::new(session_started_at);
let idle_policy = make_idle_policy(20, 50, 10);
let last_downstream_activity_ms = AtomicU64::new(0);
let result = timeout(
TokioDuration::from_secs(2),
read_client_payload_with_idle_policy(
&mut crypto_reader,
ProtoTag::Intermediate,
1024,
&buffer_pool,
&forensics,
&mut frame_counter,
&stats,
&idle_policy,
&mut idle_state,
&last_downstream_activity_ms,
session_started_at,
),
)
.await
.expect("stress task must complete");
assert!(matches!(result, Err(ProxyError::Io(ref e)) if e.kind() == std::io::ErrorKind::TimedOut));
assert_eq!(stats.get_relay_idle_hard_close_total(), 1);
assert_eq!(frame_counter, 0);
}));
}
for task in tasks {
task.await.expect("stress task must not panic");
}
}
#[test]
fn pressure_evicts_oldest_idle_candidate_with_deterministic_ordering() {
let _guard = acquire_idle_pressure_test_lock();
clear_relay_idle_pressure_state_for_testing();
let stats = Stats::new();
assert!(mark_relay_idle_candidate(10));
assert!(mark_relay_idle_candidate(11));
assert_eq!(oldest_relay_idle_candidate(), Some(10));
note_relay_pressure_event();
let mut seen_for_newer = 0u64;
assert!(
!maybe_evict_idle_candidate_on_pressure(11, &mut seen_for_newer, &stats),
"newer idle candidate must not be evicted while older candidate exists"
);
assert_eq!(oldest_relay_idle_candidate(), Some(10));
let mut seen_for_oldest = 0u64;
assert!(
maybe_evict_idle_candidate_on_pressure(10, &mut seen_for_oldest, &stats),
"oldest idle candidate must be evicted first under pressure"
);
assert_eq!(oldest_relay_idle_candidate(), Some(11));
assert_eq!(stats.get_relay_pressure_evict_total(), 1);
clear_relay_idle_pressure_state_for_testing();
}
#[test]
fn pressure_does_not_evict_without_new_pressure_signal() {
let _guard = acquire_idle_pressure_test_lock();
clear_relay_idle_pressure_state_for_testing();
let stats = Stats::new();
assert!(mark_relay_idle_candidate(21));
let mut seen = relay_pressure_event_seq();
assert!(
!maybe_evict_idle_candidate_on_pressure(21, &mut seen, &stats),
"without new pressure signal, candidate must stay"
);
assert_eq!(stats.get_relay_pressure_evict_total(), 0);
assert_eq!(oldest_relay_idle_candidate(), Some(21));
clear_relay_idle_pressure_state_for_testing();
}
#[test]
fn stress_pressure_eviction_preserves_fifo_across_many_candidates() {
let _guard = acquire_idle_pressure_test_lock();
clear_relay_idle_pressure_state_for_testing();
let stats = Stats::new();
let mut seen_per_conn = std::collections::HashMap::new();
for conn_id in 1000u64..1064u64 {
assert!(mark_relay_idle_candidate(conn_id));
seen_per_conn.insert(conn_id, 0u64);
}
for expected in 1000u64..1064u64 {
note_relay_pressure_event();
let mut seen = *seen_per_conn
.get(&expected)
.expect("per-conn pressure cursor must exist");
assert!(
maybe_evict_idle_candidate_on_pressure(expected, &mut seen, &stats),
"expected conn_id {expected} must be evicted next by deterministic FIFO ordering"
);
seen_per_conn.insert(expected, seen);
let next = if expected == 1063 {
None
} else {
Some(expected + 1)
};
assert_eq!(oldest_relay_idle_candidate(), next);
}
assert_eq!(stats.get_relay_pressure_evict_total(), 64);
clear_relay_idle_pressure_state_for_testing();
}
#[test]
fn blackhat_single_pressure_event_must_not_evict_more_than_one_candidate() {
let _guard = acquire_idle_pressure_test_lock();
clear_relay_idle_pressure_state_for_testing();
let stats = Stats::new();
assert!(mark_relay_idle_candidate(301));
assert!(mark_relay_idle_candidate(302));
assert!(mark_relay_idle_candidate(303));
let mut seen_301 = 0u64;
let mut seen_302 = 0u64;
let mut seen_303 = 0u64;
// Single pressure event should authorize at most one eviction globally.
note_relay_pressure_event();
let evicted_301 = maybe_evict_idle_candidate_on_pressure(301, &mut seen_301, &stats);
let evicted_302 = maybe_evict_idle_candidate_on_pressure(302, &mut seen_302, &stats);
let evicted_303 = maybe_evict_idle_candidate_on_pressure(303, &mut seen_303, &stats);
let evicted_total = [evicted_301, evicted_302, evicted_303]
.iter()
.filter(|value| **value)
.count();
assert_eq!(
evicted_total, 1,
"single pressure event must not cascade-evict multiple idle candidates"
);
clear_relay_idle_pressure_state_for_testing();
}
#[test]
fn blackhat_pressure_counter_must_track_global_budget_not_per_session_cursor() {
let _guard = acquire_idle_pressure_test_lock();
clear_relay_idle_pressure_state_for_testing();
let stats = Stats::new();
assert!(mark_relay_idle_candidate(401));
assert!(mark_relay_idle_candidate(402));
let mut seen_oldest = 0u64;
let mut seen_next = 0u64;
note_relay_pressure_event();
assert!(
maybe_evict_idle_candidate_on_pressure(401, &mut seen_oldest, &stats),
"oldest candidate must consume pressure budget first"
);
assert!(
!maybe_evict_idle_candidate_on_pressure(402, &mut seen_next, &stats),
"next candidate must not consume the same pressure budget"
);
assert_eq!(
stats.get_relay_pressure_evict_total(),
1,
"single pressure budget must produce exactly one eviction"
);
clear_relay_idle_pressure_state_for_testing();
}
#[test]
fn blackhat_stale_pressure_before_idle_mark_must_not_trigger_eviction() {
let _guard = acquire_idle_pressure_test_lock();
clear_relay_idle_pressure_state_for_testing();
let stats = Stats::new();
// Pressure happened before any idle candidate existed.
note_relay_pressure_event();
assert!(mark_relay_idle_candidate(501));
let mut seen = 0u64;
assert!(
!maybe_evict_idle_candidate_on_pressure(501, &mut seen, &stats),
"stale pressure (before soft-idle mark) must not evict newly marked candidate"
);
clear_relay_idle_pressure_state_for_testing();
}
#[test]
fn blackhat_stale_pressure_must_not_evict_any_of_newly_marked_batch() {
let _guard = acquire_idle_pressure_test_lock();
clear_relay_idle_pressure_state_for_testing();
let stats = Stats::new();
note_relay_pressure_event();
assert!(mark_relay_idle_candidate(511));
assert!(mark_relay_idle_candidate(512));
assert!(mark_relay_idle_candidate(513));
let mut seen_511 = 0u64;
let mut seen_512 = 0u64;
let mut seen_513 = 0u64;
let evicted = [
maybe_evict_idle_candidate_on_pressure(511, &mut seen_511, &stats),
maybe_evict_idle_candidate_on_pressure(512, &mut seen_512, &stats),
maybe_evict_idle_candidate_on_pressure(513, &mut seen_513, &stats),
]
.iter()
.filter(|value| **value)
.count();
assert_eq!(
evicted, 0,
"stale pressure event must not evict any candidate from a newly marked batch"
);
clear_relay_idle_pressure_state_for_testing();
}
#[test]
fn blackhat_stale_pressure_seen_without_candidates_must_be_globally_invalidated() {
let _guard = acquire_idle_pressure_test_lock();
clear_relay_idle_pressure_state_for_testing();
let stats = Stats::new();
note_relay_pressure_event();
// Session A observed pressure while there were no candidates.
let mut seen_a = 0u64;
assert!(
!maybe_evict_idle_candidate_on_pressure(999_001, &mut seen_a, &stats),
"no candidate existed, so no eviction is possible"
);
// Candidate appears later; Session B must not be able to consume stale pressure.
assert!(mark_relay_idle_candidate(521));
let mut seen_b = 0u64;
assert!(
!maybe_evict_idle_candidate_on_pressure(521, &mut seen_b, &stats),
"once pressure is observed with empty candidate set, it must not be replayed later"
);
clear_relay_idle_pressure_state_for_testing();
}
#[test]
fn blackhat_stale_pressure_must_not_survive_candidate_churn() {
let _guard = acquire_idle_pressure_test_lock();
clear_relay_idle_pressure_state_for_testing();
let stats = Stats::new();
note_relay_pressure_event();
assert!(mark_relay_idle_candidate(531));
clear_relay_idle_candidate(531);
assert!(mark_relay_idle_candidate(532));
let mut seen = 0u64;
assert!(
!maybe_evict_idle_candidate_on_pressure(532, &mut seen, &stats),
"stale pressure must not survive clear+remark churn cycles"
);
clear_relay_idle_pressure_state_for_testing();
}
#[test]
fn blackhat_pressure_seq_saturation_must_not_disable_future_pressure_accounting() {
let _guard = acquire_idle_pressure_test_lock();
clear_relay_idle_pressure_state_for_testing();
{
let mut guard = relay_idle_candidate_registry()
.lock()
.expect("registry lock must be available");
guard.pressure_event_seq = u64::MAX;
guard.pressure_consumed_seq = u64::MAX - 1;
}
// A new pressure event should still be representable; saturating at MAX creates a permanent lockout.
note_relay_pressure_event();
let after = relay_pressure_event_seq();
assert_ne!(
after,
u64::MAX,
"pressure sequence saturation must not permanently freeze event progression"
);
clear_relay_idle_pressure_state_for_testing();
}
#[test]
fn blackhat_pressure_seq_saturation_must_not_break_multiple_distinct_events() {
let _guard = acquire_idle_pressure_test_lock();
clear_relay_idle_pressure_state_for_testing();
{
let mut guard = relay_idle_candidate_registry()
.lock()
.expect("registry lock must be available");
guard.pressure_event_seq = u64::MAX;
guard.pressure_consumed_seq = u64::MAX;
}
note_relay_pressure_event();
let first = relay_pressure_event_seq();
note_relay_pressure_event();
let second = relay_pressure_event_seq();
assert!(
second > first,
"distinct pressure events must remain distinguishable even at sequence boundary"
);
clear_relay_idle_pressure_state_for_testing();
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn integration_race_single_pressure_event_allows_at_most_one_eviction_under_parallel_claims() {
let _guard = acquire_idle_pressure_test_lock();
clear_relay_idle_pressure_state_for_testing();
let stats = Arc::new(Stats::new());
let sessions = 16usize;
let rounds = 200usize;
let conn_ids: Vec<u64> = (10_000u64..10_000u64 + sessions as u64).collect();
let mut seen_per_session = vec![0u64; sessions];
for conn_id in &conn_ids {
assert!(mark_relay_idle_candidate(*conn_id));
}
for round in 0..rounds {
note_relay_pressure_event();
let mut joins = Vec::with_capacity(sessions);
for (idx, conn_id) in conn_ids.iter().enumerate() {
let mut seen = seen_per_session[idx];
let conn_id = *conn_id;
let stats = stats.clone();
joins.push(tokio::spawn(async move {
let evicted = maybe_evict_idle_candidate_on_pressure(conn_id, &mut seen, stats.as_ref());
(idx, conn_id, seen, evicted)
}));
}
let mut evicted_this_round = 0usize;
let mut evicted_conn = None;
for join in joins {
let (idx, conn_id, seen, evicted) = join.await.expect("race task must not panic");
seen_per_session[idx] = seen;
if evicted {
evicted_this_round += 1;
evicted_conn = Some(conn_id);
}
}
assert!(
evicted_this_round <= 1,
"round {round}: one pressure event must never produce more than one eviction"
);
if let Some(conn) = evicted_conn {
assert!(
mark_relay_idle_candidate(conn),
"round {round}: evicted conn must be re-markable as idle candidate"
);
}
}
assert!(
stats.get_relay_pressure_evict_total() <= rounds as u64,
"eviction total must never exceed number of pressure events"
);
assert!(
stats.get_relay_pressure_evict_total() > 0,
"parallel race must still observe at least one successful eviction"
);
clear_relay_idle_pressure_state_for_testing();
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalidation_and_budget() {
let _guard = acquire_idle_pressure_test_lock();
clear_relay_idle_pressure_state_for_testing();
let stats = Arc::new(Stats::new());
let sessions = 12usize;
let rounds = 120usize;
let conn_ids: Vec<u64> = (20_000u64..20_000u64 + sessions as u64).collect();
let mut seen_per_session = vec![0u64; sessions];
for conn_id in &conn_ids {
assert!(mark_relay_idle_candidate(*conn_id));
}
let mut expected_total_evictions = 0u64;
for round in 0..rounds {
let empty_phase = round % 5 == 0;
if empty_phase {
for conn_id in &conn_ids {
clear_relay_idle_candidate(*conn_id);
}
}
note_relay_pressure_event();
let mut joins = Vec::with_capacity(sessions);
for (idx, conn_id) in conn_ids.iter().enumerate() {
let mut seen = seen_per_session[idx];
let conn_id = *conn_id;
let stats = stats.clone();
joins.push(tokio::spawn(async move {
let evicted = maybe_evict_idle_candidate_on_pressure(conn_id, &mut seen, stats.as_ref());
(idx, conn_id, seen, evicted)
}));
}
let mut evicted_this_round = 0usize;
let mut evicted_conn = None;
for join in joins {
let (idx, conn_id, seen, evicted) = join.await.expect("burst race task must not panic");
seen_per_session[idx] = seen;
if evicted {
evicted_this_round += 1;
evicted_conn = Some(conn_id);
}
}
if empty_phase {
assert_eq!(
evicted_this_round, 0,
"round {round}: empty candidate phase must not allow stale-pressure eviction"
);
for conn_id in &conn_ids {
assert!(mark_relay_idle_candidate(*conn_id));
}
} else {
assert!(
evicted_this_round <= 1,
"round {round}: pressure budget must cap at one eviction"
);
if let Some(conn_id) = evicted_conn {
expected_total_evictions = expected_total_evictions.saturating_add(1);
assert!(mark_relay_idle_candidate(conn_id));
}
}
}
assert_eq!(
stats.get_relay_pressure_evict_total(),
expected_total_evictions,
"global pressure eviction counter must match observed per-round successful consumes"
);
clear_relay_idle_pressure_state_for_testing();
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,210 @@
use super::*;
use crate::error::ProxyError;
use crate::stats::Stats;
use crate::stream::BufferPool;
use std::sync::Arc;
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
use tokio::time::{Duration, Instant, timeout};
// ------------------------------------------------------------------
// Priority 3: Async Relay HOL Blocking Prevention (OWASP ASVS 5.1.5)
// ------------------------------------------------------------------
#[tokio::test]
async fn relay_hol_blocking_prevention_regression() {
let stats = Arc::new(Stats::new());
let user = "hol-user";
let (client_peer, relay_client) = duplex(65536);
let (relay_server, server_peer) = duplex(65536);
let (client_reader, client_writer) = tokio::io::split(relay_client);
let (server_reader, server_writer) = tokio::io::split(relay_server);
let (mut cp_reader, mut cp_writer) = tokio::io::split(client_peer);
let (mut sp_reader, mut sp_writer) = tokio::io::split(server_peer);
let relay_task = tokio::spawn(relay_bidirectional(
client_reader,
client_writer,
server_reader,
server_writer,
8192,
8192,
user,
Arc::clone(&stats),
None,
Arc::new(BufferPool::new()),
));
let payload_size = 1024 * 10;
let s2c_payload = vec![0x41; payload_size];
let c2s_payload = vec![0x42; payload_size];
let s2c_handle = tokio::spawn(async move {
sp_writer.write_all(&s2c_payload).await.unwrap();
let mut total_read = 0;
let mut buf = [0u8; 10];
while total_read < payload_size {
let n = cp_reader.read(&mut buf).await.unwrap();
total_read += n;
tokio::time::sleep(Duration::from_millis(100)).await;
}
});
let start = Instant::now();
cp_writer.write_all(&c2s_payload).await.unwrap();
let mut server_buf = vec![0u8; payload_size];
sp_reader.read_exact(&mut server_buf).await.unwrap();
let elapsed = start.elapsed();
assert!(elapsed < Duration::from_millis(1000), "C->S must not be blocked by slow S->C (HOL blocking): {:?}", elapsed);
assert_eq!(server_buf, c2s_payload);
s2c_handle.abort();
relay_task.abort();
}
// ------------------------------------------------------------------
// Priority 3: Data Quota Mid-Session Cutoff (OWASP ASVS 5.1.6)
// ------------------------------------------------------------------
#[tokio::test]
async fn relay_quota_mid_session_cutoff() {
let stats = Arc::new(Stats::new());
let user = "quota-mid-user";
let quota = 5000;
let (client_peer, relay_client) = duplex(8192);
let (relay_server, server_peer) = duplex(8192);
let (client_reader, client_writer) = tokio::io::split(relay_client);
let (server_reader, server_writer) = tokio::io::split(relay_server);
let (mut _cp_reader, mut cp_writer) = tokio::io::split(client_peer);
let (mut sp_reader, _sp_writer) = tokio::io::split(server_peer);
let relay_task = tokio::spawn(relay_bidirectional(
client_reader,
client_writer,
server_reader,
server_writer,
1024,
1024,
user,
Arc::clone(&stats),
Some(quota),
Arc::new(BufferPool::new()),
));
// Send 4000 bytes (Ok)
let buf1 = vec![0x42; 4000];
cp_writer.write_all(&buf1).await.unwrap();
let mut server_recv = vec![0u8; 4000];
sp_reader.read_exact(&mut server_recv).await.unwrap();
// Send another 2000 bytes (Total 6000 > 5000)
let buf2 = vec![0x42; 2000];
let _ = cp_writer.write_all(&buf2).await;
let relay_res = timeout(Duration::from_secs(1), relay_task).await.unwrap();
match relay_res {
Ok(Err(ProxyError::DataQuotaExceeded { .. })) => {
// Expected
}
other => panic!("Expected DataQuotaExceeded error, got: {:?}", other),
}
let mut small_buf = [0u8; 1];
let n = sp_reader.read(&mut small_buf).await.unwrap();
assert_eq!(n, 0, "Server must see EOF after quota reached");
}
#[tokio::test]
async fn relay_chaos_half_close_crossfire_terminates_without_hang() {
let stats = Arc::new(Stats::new());
let (mut client_peer, relay_client) = duplex(8192);
let (relay_server, mut server_peer) = duplex(8192);
let (client_reader, client_writer) = tokio::io::split(relay_client);
let (server_reader, server_writer) = tokio::io::split(relay_server);
let relay_task = tokio::spawn(relay_bidirectional(
client_reader,
client_writer,
server_reader,
server_writer,
1024,
1024,
"half-close-crossfire",
Arc::clone(&stats),
None,
Arc::new(BufferPool::new()),
));
client_peer.write_all(b"c2s-pre-half-close").await.unwrap();
server_peer.write_all(b"s2c-pre-half-close").await.unwrap();
client_peer.shutdown().await.unwrap();
tokio::time::sleep(Duration::from_millis(10)).await;
server_peer.shutdown().await.unwrap();
let done = timeout(Duration::from_secs(1), relay_task)
.await
.expect("relay must terminate after bilateral half-close")
.expect("relay task must not panic");
assert!(done.is_ok(), "relay must terminate cleanly under half-close crossfire");
}
#[tokio::test]
#[ignore = "heavy soak; run manually"]
async fn relay_soak_bidirectional_temporal_jitter_5k_rounds() {
let stats = Arc::new(Stats::new());
let (mut client_peer, relay_client) = duplex(65536);
let (relay_server, mut server_peer) = duplex(65536);
let (client_reader, client_writer) = tokio::io::split(relay_client);
let (server_reader, server_writer) = tokio::io::split(relay_server);
let relay_task = tokio::spawn(relay_bidirectional(
client_reader,
client_writer,
server_reader,
server_writer,
4096,
4096,
"soak-jitter-user",
Arc::clone(&stats),
None,
Arc::new(BufferPool::new()),
));
for i in 0..5_000u32 {
let c = [((i as u8).wrapping_mul(13)).wrapping_add(1); 17];
client_peer.write_all(&c).await.unwrap();
let mut c_seen = [0u8; 17];
server_peer.read_exact(&mut c_seen).await.unwrap();
assert_eq!(c_seen, c);
let s = [((i as u8).wrapping_mul(7)).wrapping_add(3); 23];
server_peer.write_all(&s).await.unwrap();
let mut s_seen = [0u8; 23];
client_peer.read_exact(&mut s_seen).await.unwrap();
assert_eq!(s_seen, s);
if i % 10 == 0 {
tokio::time::sleep(Duration::from_millis((i % 3) as u64)).await;
}
}
drop(client_peer);
drop(server_peer);
let done = timeout(Duration::from_secs(2), relay_task)
.await
.expect("relay must stop after soak peers close")
.expect("relay task must not panic");
assert!(done.is_ok());
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,406 @@
use super::*;
use rand::{Rng, SeedableRng};
use rand::rngs::StdRng;
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
#[test]
fn cutover_stagger_delay_is_deterministic_for_same_inputs() {
let d1 = cutover_stagger_delay(0x0123_4567_89ab_cdef, 42);
let d2 = cutover_stagger_delay(0x0123_4567_89ab_cdef, 42);
assert_eq!(
d1, d2,
"stagger delay must be deterministic for identical session/generation inputs"
);
}
#[test]
fn cutover_stagger_delay_stays_within_budget_bounds() {
// Black-hat model: censors trigger many cutovers and correlate disconnect timing.
// Keep delay inside a narrow coarse window to avoid long-tail spikes.
for generation in [0u64, 1, 2, 3, 16, 128, u32::MAX as u64, u64::MAX] {
for session_id in [
0u64,
1,
2,
0xdead_beef,
0xfeed_face_cafe_babe,
u64::MAX,
] {
let delay = cutover_stagger_delay(session_id, generation);
assert!(
(1000..=1999).contains(&delay.as_millis()),
"stagger delay must remain in fixed 1000..=1999ms budget"
);
}
}
}
#[test]
fn cutover_stagger_delay_changes_with_generation_for_same_session() {
let session_id = 0x0123_4567_89ab_cdef;
let first = cutover_stagger_delay(session_id, 100);
let second = cutover_stagger_delay(session_id, 101);
assert_ne!(
first, second,
"adjacent cutover generations should decorrelate disconnect delays"
);
}
#[test]
fn route_runtime_set_mode_is_idempotent_for_same_mode() {
let runtime = RouteRuntimeController::new(RelayRouteMode::Direct);
let first = runtime.snapshot();
let changed = runtime.set_mode(RelayRouteMode::Direct);
let second = runtime.snapshot();
assert!(
changed.is_none(),
"setting already-active mode must not produce a cutover event"
);
assert_eq!(
first.generation, second.generation,
"idempotent mode set must not bump generation"
);
}
#[test]
fn affected_cutover_state_triggers_only_for_newer_generation() {
let runtime = RouteRuntimeController::new(RelayRouteMode::Direct);
let rx = runtime.subscribe();
let initial = runtime.snapshot();
assert!(
affected_cutover_state(&rx, RelayRouteMode::Direct, initial.generation).is_none(),
"current generation must not be considered a cutover for existing session"
);
let next = runtime
.set_mode(RelayRouteMode::Middle)
.expect("mode change must produce cutover state");
let seen = affected_cutover_state(&rx, RelayRouteMode::Direct, initial.generation)
.expect("newer generation must be observed as cutover");
assert_eq!(seen.generation, next.generation);
assert_eq!(seen.mode, RelayRouteMode::Middle);
}
#[test]
fn integration_watch_and_snapshot_follow_same_transition_sequence() {
let runtime = RouteRuntimeController::new(RelayRouteMode::Direct);
let rx = runtime.subscribe();
let sequence = [
RelayRouteMode::Middle,
RelayRouteMode::Middle,
RelayRouteMode::Direct,
RelayRouteMode::Direct,
RelayRouteMode::Middle,
];
let mut expected_generation = 0u64;
let mut expected_mode = RelayRouteMode::Direct;
for target in sequence {
let changed = runtime.set_mode(target);
if target == expected_mode {
assert!(changed.is_none(), "idempotent transition must return none");
} else {
expected_mode = target;
expected_generation = expected_generation.saturating_add(1);
let emitted = changed.expect("real transition must emit cutover state");
assert_eq!(emitted.mode, expected_mode);
assert_eq!(emitted.generation, expected_generation);
}
let snap = runtime.snapshot();
let watched = *rx.borrow();
assert_eq!(snap, watched, "snapshot and watch state must stay aligned");
assert_eq!(snap.mode, expected_mode);
assert_eq!(snap.generation, expected_generation);
}
}
#[test]
fn session_is_not_affected_when_mode_matches_even_if_generation_advanced() {
let session_mode = RelayRouteMode::Direct;
let current = RouteCutoverState {
mode: RelayRouteMode::Direct,
generation: 2,
};
let session_generation = 0;
assert!(
!is_session_affected_by_cutover(current, session_mode, session_generation),
"session on matching final route mode should not be force-cut over on intermediate generation bumps"
);
}
#[test]
fn cutover_predicate_rejects_equal_generation_even_if_mode_differs() {
let current = RouteCutoverState {
mode: RelayRouteMode::Middle,
generation: 77,
};
assert!(
!is_session_affected_by_cutover(current, RelayRouteMode::Direct, 77),
"equal generation must never trigger cutover regardless of mode mismatch"
);
}
#[test]
fn adversarial_route_oscillation_only_cuts_over_sessions_with_different_final_mode() {
let runtime = RouteRuntimeController::new(RelayRouteMode::Direct);
let rx = runtime.subscribe();
let session_generation = runtime.snapshot().generation;
runtime
.set_mode(RelayRouteMode::Middle)
.expect("direct->middle must transition");
runtime
.set_mode(RelayRouteMode::Direct)
.expect("middle->direct must transition");
assert!(
affected_cutover_state(&rx, RelayRouteMode::Direct, session_generation).is_none(),
"direct session should survive when final mode returns to direct"
);
assert!(
affected_cutover_state(&rx, RelayRouteMode::Middle, session_generation).is_some(),
"middle session should be cut over when final mode is direct"
);
}
#[test]
fn light_fuzz_cutover_predicate_matches_reference_oracle() {
let mut rng = StdRng::seed_from_u64(0xC0DEC0DE5EED);
for _ in 0..20_000 {
let current = RouteCutoverState {
mode: if rng.random::<bool>() {
RelayRouteMode::Direct
} else {
RelayRouteMode::Middle
},
generation: rng.random_range(0u64..1_000_000),
};
let session_mode = if rng.random::<bool>() {
RelayRouteMode::Direct
} else {
RelayRouteMode::Middle
};
let session_generation = rng.random_range(0u64..1_000_000);
let expected = current.generation > session_generation && current.mode != session_mode;
let actual = is_session_affected_by_cutover(current, session_mode, session_generation);
assert_eq!(
actual, expected,
"cutover predicate must match mode-aware generation oracle"
);
}
}
#[test]
fn light_fuzz_set_mode_generation_tracks_only_real_transitions() {
let runtime = RouteRuntimeController::new(RelayRouteMode::Direct);
let mut rng = StdRng::seed_from_u64(0x0DDC0FFE);
let mut expected_mode = RelayRouteMode::Direct;
let mut expected_generation = 0u64;
for _ in 0..10_000 {
let candidate = if rng.random::<bool>() {
RelayRouteMode::Direct
} else {
RelayRouteMode::Middle
};
let changed = runtime.set_mode(candidate);
if candidate == expected_mode {
assert!(changed.is_none(), "idempotent set_mode must not emit cutover state");
} else {
expected_mode = candidate;
expected_generation = expected_generation.saturating_add(1);
let next = changed.expect("mode transition must emit cutover state");
assert_eq!(next.mode, expected_mode);
assert_eq!(next.generation, expected_generation);
}
}
let final_state = runtime.snapshot();
assert_eq!(final_state.mode, expected_mode);
assert_eq!(final_state.generation, expected_generation);
}
#[test]
fn stress_snapshot_and_watch_state_remain_consistent_under_concurrent_switch_storm() {
let runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
std::thread::scope(|scope| {
let mut writers = Vec::new();
for worker in 0..4usize {
let runtime = Arc::clone(&runtime);
writers.push(scope.spawn(move || {
for step in 0..20_000usize {
let mode = if (worker + step) % 2 == 0 {
RelayRouteMode::Direct
} else {
RelayRouteMode::Middle
};
let _ = runtime.set_mode(mode);
}
}));
}
for writer in writers {
writer
.join()
.expect("route mode writer thread must not panic");
}
let rx = runtime.subscribe();
for _ in 0..128 {
assert_eq!(
runtime.snapshot(),
*rx.borrow(),
"snapshot and watch state must converge after concurrent set_mode churn"
);
std::thread::yield_now();
}
});
}
#[test]
fn stress_concurrent_transition_count_matches_final_generation() {
let runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
let successful_transitions = Arc::new(AtomicU64::new(0));
std::thread::scope(|scope| {
let mut workers = Vec::new();
for worker in 0..6usize {
let runtime = Arc::clone(&runtime);
let successful_transitions = Arc::clone(&successful_transitions);
workers.push(scope.spawn(move || {
let mut state = (worker as u64 + 1).wrapping_mul(0x9E37_79B9_7F4A_7C15);
for _ in 0..25_000usize {
state ^= state << 7;
state ^= state >> 9;
state ^= state << 8;
let mode = if (state & 1) == 0 {
RelayRouteMode::Direct
} else {
RelayRouteMode::Middle
};
if runtime.set_mode(mode).is_some() {
successful_transitions.fetch_add(1, Ordering::Relaxed);
}
}
}));
}
for worker in workers {
worker.join().expect("route mode transition worker must not panic");
}
});
let final_state = runtime.snapshot();
assert_eq!(
final_state.generation,
successful_transitions.load(Ordering::Relaxed),
"final generation must equal number of accepted mode transitions"
);
assert_eq!(
final_state,
*runtime.subscribe().borrow(),
"watch and snapshot state must match after concurrent transition accounting"
);
}
#[test]
fn light_fuzz_cutover_stagger_delay_distribution_stays_in_fixed_window() {
// Deterministic xorshift fuzzing keeps this test stable across runs.
let mut s: u64 = 0x9E37_79B9_7F4A_7C15;
for _ in 0..20_000 {
s ^= s << 7;
s ^= s >> 9;
s ^= s << 8;
let session_id = s;
s ^= s << 7;
s ^= s >> 9;
s ^= s << 8;
let generation = s;
let delay = cutover_stagger_delay(session_id, generation);
assert!(
(1000..=1999).contains(&delay.as_millis()),
"fuzzed inputs must always map into fixed stagger window"
);
}
}
#[test]
fn cutover_stagger_delay_distribution_has_no_empty_buckets_under_sequential_sessions() {
let mut buckets = [0usize; 1000];
let generation = 4242u64;
for session_id in 0..250_000u64 {
let delay_ms = cutover_stagger_delay(session_id, generation).as_millis() as usize;
let idx = delay_ms - 1000;
buckets[idx] += 1;
}
let empty = buckets.iter().filter(|&&count| count == 0).count();
assert_eq!(
empty, 0,
"all 1000 delay buckets must be exercised to avoid cutover herd clustering"
);
}
#[test]
fn light_fuzz_cutover_stagger_delay_distribution_stays_reasonably_uniform() {
let mut buckets = [0usize; 1000];
let mut s: u64 = 0x1BAD_B002_CAFE_F00D;
for _ in 0..300_000usize {
s ^= s << 7;
s ^= s >> 9;
s ^= s << 8;
let session_id = s;
s ^= s << 7;
s ^= s >> 9;
s ^= s << 8;
let generation = s;
let delay_ms = cutover_stagger_delay(session_id, generation).as_millis() as usize;
buckets[delay_ms - 1000] += 1;
}
let min = *buckets.iter().min().unwrap_or(&0);
let max = *buckets.iter().max().unwrap_or(&0);
assert!(min > 0, "fuzzed distribution must not leave empty buckets");
assert!(
max <= min.saturating_mul(3),
"bucket skew is too high for anti-herd staggering (max={max}, min={min})"
);
}
#[test]
fn stress_cutover_stagger_delay_distribution_remains_stable_across_generations() {
for generation in [0u64, 1, 7, 31, 255, 1024, u32::MAX as u64, u64::MAX - 1] {
let mut buckets = [0usize; 1000];
for session_id in 0..100_000u64 {
let delay_ms = cutover_stagger_delay(session_id ^ 0x9E37_79B9, generation)
.as_millis() as usize;
buckets[delay_ms - 1000] += 1;
}
let min = *buckets.iter().min().unwrap_or(&0);
let max = *buckets.iter().max().unwrap_or(&0);
assert!(
max <= min.saturating_mul(4).max(1),
"generation={generation}: distribution collapsed (max={max}, min={min})"
);
}
}