diff --git a/src/maestro/listeners.rs b/src/maestro/listeners.rs index 3b2a92f..014f69c 100644 --- a/src/maestro/listeners.rs +++ b/src/maestro/listeners.rs @@ -14,6 +14,7 @@ use crate::crypto::SecureRandom; use crate::ip_tracker::UserIpTracker; use crate::proxy::ClientHandler; use crate::proxy::route_mode::{ROUTE_SWITCH_ERROR_MSG, RouteRuntimeController}; +use crate::proxy::shared_state::ProxySharedState; use crate::startup::{COMPONENT_LISTENERS_BIND, StartupTracker}; use crate::stats::beobachten::BeobachtenStore; use crate::stats::{ReplayChecker, Stats}; @@ -49,6 +50,7 @@ pub(crate) async fn bind_listeners( tls_cache: Option>, ip_tracker: Arc, beobachten: Arc, + shared: Arc, max_connections: Arc, ) -> Result> { startup_tracker @@ -224,6 +226,7 @@ pub(crate) async fn bind_listeners( let tls_cache = tls_cache.clone(); let ip_tracker = ip_tracker.clone(); let beobachten = beobachten.clone(); + let shared = shared.clone(); let max_connections_unix = max_connections.clone(); tokio::spawn(async move { @@ -284,11 +287,12 @@ pub(crate) async fn bind_listeners( let tls_cache = tls_cache.clone(); let ip_tracker = ip_tracker.clone(); let beobachten = beobachten.clone(); + let shared = shared.clone(); let proxy_protocol_enabled = config.server.proxy_protocol; tokio::spawn(async move { let _permit = permit; - if let Err(e) = crate::proxy::client::handle_client_stream( + if let Err(e) = crate::proxy::client::handle_client_stream_with_shared( stream, fake_peer, config, @@ -302,6 +306,7 @@ pub(crate) async fn bind_listeners( tls_cache, ip_tracker, beobachten, + shared, proxy_protocol_enabled, ) .await @@ -351,6 +356,7 @@ pub(crate) fn spawn_tcp_accept_loops( tls_cache: Option>, ip_tracker: Arc, beobachten: Arc, + shared: Arc, max_connections: Arc, ) { for (listener, listener_proxy_protocol) in listeners { @@ -366,6 +372,7 @@ pub(crate) fn spawn_tcp_accept_loops( let tls_cache = tls_cache.clone(); let ip_tracker = ip_tracker.clone(); let beobachten = beobachten.clone(); + let shared = shared.clone(); let max_connections_tcp = max_connections.clone(); tokio::spawn(async move { @@ -421,13 +428,14 @@ pub(crate) fn spawn_tcp_accept_loops( let tls_cache = tls_cache.clone(); let ip_tracker = ip_tracker.clone(); let beobachten = beobachten.clone(); + let shared = shared.clone(); let proxy_protocol_enabled = listener_proxy_protocol; let real_peer_report = Arc::new(std::sync::Mutex::new(None)); let real_peer_report_for_handler = real_peer_report.clone(); tokio::spawn(async move { let _permit = permit; - if let Err(e) = ClientHandler::new( + if let Err(e) = ClientHandler::new_with_shared( stream, peer_addr, config, @@ -441,6 +449,7 @@ pub(crate) fn spawn_tcp_accept_loops( tls_cache, ip_tracker, beobachten, + shared, proxy_protocol_enabled, real_peer_report_for_handler, ) diff --git a/src/maestro/mod.rs b/src/maestro/mod.rs index aa95cb6..dd11dc2 100644 --- a/src/maestro/mod.rs +++ b/src/maestro/mod.rs @@ -33,6 +33,7 @@ use crate::crypto::SecureRandom; use crate::ip_tracker::UserIpTracker; use crate::network::probe::{decide_network_capabilities, log_probe_result, run_probe}; use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController}; +use crate::proxy::shared_state::ProxySharedState; use crate::startup::{ COMPONENT_API_BOOTSTRAP, COMPONENT_CONFIG_LOAD, COMPONENT_ME_POOL_CONSTRUCT, COMPONENT_ME_POOL_INIT_STAGE1, COMPONENT_ME_PROXY_CONFIG_V4, COMPONENT_ME_PROXY_CONFIG_V6, @@ -631,6 +632,7 @@ async fn run_inner( ) .await; let _admission_tx_hold = admission_tx; + let shared_state = ProxySharedState::new(); let bound = listeners::bind_listeners( &config, @@ -651,6 +653,7 @@ async fn run_inner( tls_cache.clone(), ip_tracker.clone(), beobachten.clone(), + shared_state.clone(), max_connections.clone(), ) .await?; @@ -707,6 +710,7 @@ async fn run_inner( tls_cache.clone(), ip_tracker.clone(), beobachten.clone(), + shared_state, max_connections.clone(), ); diff --git a/src/proxy/client.rs b/src/proxy/client.rs index 7472459..7942906 100644 --- a/src/proxy/client.rs +++ b/src/proxy/client.rs @@ -81,10 +81,15 @@ use crate::transport::socket::normalize_ip; use crate::transport::{UpstreamManager, configure_client_socket, parse_proxy_protocol}; use crate::proxy::direct_relay::handle_via_direct; -use crate::proxy::handshake::{HandshakeSuccess, handle_mtproto_handshake, handle_tls_handshake}; +use crate::proxy::handshake::{ + HandshakeSuccess, handle_mtproto_handshake_with_shared, handle_tls_handshake_with_shared, +}; +#[cfg(test)] +use crate::proxy::handshake::{handle_mtproto_handshake, handle_tls_handshake}; use crate::proxy::masking::handle_bad_client; use crate::proxy::middle_relay::handle_via_middle_proxy; use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController}; +use crate::proxy::shared_state::ProxySharedState; fn beobachten_ttl(config: &ProxyConfig) -> Duration { const BEOBACHTEN_TTL_MAX_MINUTES: u64 = 24 * 60; @@ -342,7 +347,48 @@ fn synthetic_local_addr(port: u16) -> SocketAddr { SocketAddr::from(([0, 0, 0, 0], port)) } +#[cfg(test)] pub async fn handle_client_stream( + stream: S, + peer: SocketAddr, + config: Arc, + stats: Arc, + upstream_manager: Arc, + replay_checker: Arc, + buffer_pool: Arc, + rng: Arc, + me_pool: Option>, + route_runtime: Arc, + tls_cache: Option>, + ip_tracker: Arc, + beobachten: Arc, + proxy_protocol_enabled: bool, +) -> Result<()> +where + S: AsyncRead + AsyncWrite + Unpin + Send + 'static, +{ + handle_client_stream_with_shared( + stream, + peer, + config, + stats, + upstream_manager, + replay_checker, + buffer_pool, + rng, + me_pool, + route_runtime, + tls_cache, + ip_tracker, + beobachten, + ProxySharedState::new(), + proxy_protocol_enabled, + ) + .await +} + +#[allow(clippy::too_many_arguments)] +pub async fn handle_client_stream_with_shared( mut stream: S, peer: SocketAddr, config: Arc, @@ -356,6 +402,7 @@ pub async fn handle_client_stream( tls_cache: Option>, ip_tracker: Arc, beobachten: Arc, + shared: Arc, proxy_protocol_enabled: bool, ) -> Result<()> where @@ -550,9 +597,10 @@ where let (read_half, write_half) = tokio::io::split(stream); - let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake( + let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake_with_shared( &handshake, read_half, write_half, real_peer, &config, &replay_checker, &rng, tls_cache.clone(), + shared.as_ref(), ).await { HandshakeResult::Success(result) => result, HandshakeResult::BadClient { reader, writer } => { @@ -578,9 +626,10 @@ where let mtproto_handshake: [u8; HANDSHAKE_LEN] = mtproto_data[..].try_into() .map_err(|_| ProxyError::InvalidHandshake("Short MTProto handshake".into()))?; - let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake( + let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake_with_shared( &mtproto_handshake, tls_reader, tls_writer, real_peer, &config, &replay_checker, true, Some(tls_user.as_str()), + shared.as_ref(), ).await { HandshakeResult::Success(result) => result, HandshakeResult::BadClient { reader, writer } => { @@ -614,11 +663,12 @@ where }; Ok(HandshakeOutcome::NeedsRelay(Box::pin( - RunningClientHandler::handle_authenticated_static( + RunningClientHandler::handle_authenticated_static_with_shared( crypto_reader, crypto_writer, success, upstream_manager, stats, config, buffer_pool, rng, me_pool, route_runtime.clone(), local_addr, real_peer, ip_tracker.clone(), + shared.clone(), ), ))) } else { @@ -644,9 +694,10 @@ where let (read_half, write_half) = tokio::io::split(stream); - let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake( + let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake_with_shared( &handshake, read_half, write_half, real_peer, &config, &replay_checker, false, None, + shared.as_ref(), ).await { HandshakeResult::Success(result) => result, HandshakeResult::BadClient { reader, writer } => { @@ -665,7 +716,7 @@ where }; Ok(HandshakeOutcome::NeedsRelay(Box::pin( - RunningClientHandler::handle_authenticated_static( + RunningClientHandler::handle_authenticated_static_with_shared( crypto_reader, crypto_writer, success, @@ -679,6 +730,7 @@ where local_addr, real_peer, ip_tracker.clone(), + shared.clone(), ) ))) } @@ -731,10 +783,12 @@ pub struct RunningClientHandler { tls_cache: Option>, ip_tracker: Arc, beobachten: Arc, + shared: Arc, proxy_protocol_enabled: bool, } impl ClientHandler { + #[cfg(test)] pub fn new( stream: TcpStream, peer: SocketAddr, @@ -751,6 +805,45 @@ impl ClientHandler { beobachten: Arc, proxy_protocol_enabled: bool, real_peer_report: Arc>>, + ) -> RunningClientHandler { + Self::new_with_shared( + stream, + peer, + config, + stats, + upstream_manager, + replay_checker, + buffer_pool, + rng, + me_pool, + route_runtime, + tls_cache, + ip_tracker, + beobachten, + ProxySharedState::new(), + proxy_protocol_enabled, + real_peer_report, + ) + } + + #[allow(clippy::too_many_arguments)] + pub fn new_with_shared( + stream: TcpStream, + peer: SocketAddr, + config: Arc, + stats: Arc, + upstream_manager: Arc, + replay_checker: Arc, + buffer_pool: Arc, + rng: Arc, + me_pool: Option>, + route_runtime: Arc, + tls_cache: Option>, + ip_tracker: Arc, + beobachten: Arc, + shared: Arc, + proxy_protocol_enabled: bool, + real_peer_report: Arc>>, ) -> RunningClientHandler { let normalized_peer = normalize_ip(peer); RunningClientHandler { @@ -769,6 +862,7 @@ impl ClientHandler { tls_cache, ip_tracker, beobachten, + shared, proxy_protocol_enabled, } } @@ -1058,7 +1152,7 @@ impl RunningClientHandler { let (read_half, write_half) = self.stream.into_split(); - let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake( + let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake_with_shared( &handshake, read_half, write_half, @@ -1067,6 +1161,7 @@ impl RunningClientHandler { &replay_checker, &self.rng, self.tls_cache.clone(), + self.shared.as_ref(), ) .await { @@ -1095,7 +1190,7 @@ impl RunningClientHandler { .try_into() .map_err(|_| ProxyError::InvalidHandshake("Short MTProto handshake".into()))?; - let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake( + let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake_with_shared( &mtproto_handshake, tls_reader, tls_writer, @@ -1104,6 +1199,7 @@ impl RunningClientHandler { &replay_checker, true, Some(tls_user.as_str()), + self.shared.as_ref(), ) .await { @@ -1140,7 +1236,7 @@ impl RunningClientHandler { }; Ok(HandshakeOutcome::NeedsRelay(Box::pin( - Self::handle_authenticated_static( + Self::handle_authenticated_static_with_shared( crypto_reader, crypto_writer, success, @@ -1154,6 +1250,7 @@ impl RunningClientHandler { local_addr, peer, self.ip_tracker, + self.shared, ), ))) } @@ -1192,7 +1289,7 @@ impl RunningClientHandler { let (read_half, write_half) = self.stream.into_split(); - let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake( + let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake_with_shared( &handshake, read_half, write_half, @@ -1201,6 +1298,7 @@ impl RunningClientHandler { &replay_checker, false, None, + self.shared.as_ref(), ) .await { @@ -1221,7 +1319,7 @@ impl RunningClientHandler { }; Ok(HandshakeOutcome::NeedsRelay(Box::pin( - Self::handle_authenticated_static( + Self::handle_authenticated_static_with_shared( crypto_reader, crypto_writer, success, @@ -1235,6 +1333,7 @@ impl RunningClientHandler { local_addr, peer, self.ip_tracker, + self.shared, ), ))) } @@ -1243,6 +1342,7 @@ impl RunningClientHandler { /// Two modes: /// - Direct: TCP relay to TG DC (existing behavior) /// - Middle Proxy: RPC multiplex through ME pool (new — supports CDN DCs) + #[cfg(test)] async fn handle_authenticated_static( client_reader: CryptoReader, client_writer: CryptoWriter, @@ -1258,6 +1358,45 @@ impl RunningClientHandler { peer_addr: SocketAddr, ip_tracker: Arc, ) -> Result<()> + where + R: AsyncRead + Unpin + Send + 'static, + W: AsyncWrite + Unpin + Send + 'static, + { + Self::handle_authenticated_static_with_shared( + client_reader, + client_writer, + success, + upstream_manager, + stats, + config, + buffer_pool, + rng, + me_pool, + route_runtime, + local_addr, + peer_addr, + ip_tracker, + ProxySharedState::new(), + ) + .await + } + + async fn handle_authenticated_static_with_shared( + client_reader: CryptoReader, + client_writer: CryptoWriter, + success: HandshakeSuccess, + upstream_manager: Arc, + stats: Arc, + config: Arc, + buffer_pool: Arc, + rng: Arc, + me_pool: Option>, + route_runtime: Arc, + local_addr: SocketAddr, + peer_addr: SocketAddr, + ip_tracker: Arc, + _shared: Arc, + ) -> Result<()> where R: AsyncRead + Unpin + Send + 'static, W: AsyncWrite + Unpin + Send + 'static, @@ -1299,6 +1438,7 @@ impl RunningClientHandler { route_runtime.subscribe(), route_snapshot, session_id, + _shared, ) .await } else { diff --git a/src/proxy/handshake.rs b/src/proxy/handshake.rs index 0f3be02..9c41e91 100644 --- a/src/proxy/handshake.rs +++ b/src/proxy/handshake.rs @@ -4,13 +4,16 @@ use dashmap::DashMap; use dashmap::mapref::entry::Entry; +#[cfg(test)] use std::collections::HashSet; +#[cfg(test)] use std::collections::hash_map::RandomState; use std::hash::{BuildHasher, Hash, Hasher}; use std::net::SocketAddr; use std::net::{IpAddr, Ipv6Addr}; use std::sync::Arc; -use std::sync::{Mutex, OnceLock}; +#[cfg(test)] +use std::sync::Mutex; use std::time::{Duration, Instant}; use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; use tracing::{debug, info, trace, warn}; @@ -21,15 +24,15 @@ use crate::crypto::{AesCtr, SecureRandom, sha256}; use crate::error::{HandshakeResult, ProxyError}; use crate::protocol::constants::*; use crate::protocol::tls; +use crate::proxy::shared_state::ProxySharedState; use crate::stats::ReplayChecker; use crate::stream::{CryptoReader, CryptoWriter, FakeTlsReader, FakeTlsWriter}; use crate::tls_front::{TlsFrontCache, emulator}; +#[cfg(test)] use rand::RngExt; const ACCESS_SECRET_BYTES: usize = 16; -static INVALID_SECRET_WARNED: OnceLock>> = OnceLock::new(); const UNKNOWN_SNI_WARN_COOLDOWN_SECS: u64 = 5; -static UNKNOWN_SNI_WARN_NEXT_ALLOWED: OnceLock>> = OnceLock::new(); #[cfg(test)] const WARNED_SECRET_MAX_ENTRIES: usize = 64; #[cfg(not(test))] @@ -55,48 +58,30 @@ const AUTH_PROBE_BACKOFF_MAX_MS: u64 = 16; const AUTH_PROBE_BACKOFF_MAX_MS: u64 = 1_000; #[derive(Clone, Copy)] -struct AuthProbeState { +pub(crate) struct AuthProbeState { fail_streak: u32, blocked_until: Instant, last_seen: Instant, } #[derive(Clone, Copy)] -struct AuthProbeSaturationState { +pub(crate) struct AuthProbeSaturationState { fail_streak: u32, blocked_until: Instant, last_seen: Instant, } - -static AUTH_PROBE_STATE: OnceLock> = OnceLock::new(); -static AUTH_PROBE_SATURATION_STATE: OnceLock>> = - OnceLock::new(); -static AUTH_PROBE_EVICTION_HASHER: OnceLock = OnceLock::new(); - -fn auth_probe_state_map() -> &'static DashMap { - AUTH_PROBE_STATE.get_or_init(DashMap::new) -} - -fn auth_probe_saturation_state() -> &'static Mutex> { - AUTH_PROBE_SATURATION_STATE.get_or_init(|| Mutex::new(None)) -} - -fn auth_probe_saturation_state_lock() --> std::sync::MutexGuard<'static, Option> { - auth_probe_saturation_state() +fn unknown_sni_warn_state_lock_in( + shared: &ProxySharedState, +) -> std::sync::MutexGuard<'_, Option> { + shared + .handshake + .unknown_sni_warn_next_allowed .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()) } -fn unknown_sni_warn_state_lock() -> std::sync::MutexGuard<'static, Option> { - UNKNOWN_SNI_WARN_NEXT_ALLOWED - .get_or_init(|| Mutex::new(None)) - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()) -} - -fn should_emit_unknown_sni_warn(now: Instant) -> bool { - let mut guard = unknown_sni_warn_state_lock(); +fn should_emit_unknown_sni_warn_in(shared: &ProxySharedState, now: Instant) -> bool { + let mut guard = unknown_sni_warn_state_lock_in(shared); if let Some(next_allowed) = *guard && now < next_allowed { @@ -133,15 +118,16 @@ fn auth_probe_state_expired(state: &AuthProbeState, now: Instant) -> bool { now.duration_since(state.last_seen) > retention } -fn auth_probe_eviction_offset(peer_ip: IpAddr, now: Instant) -> usize { - let hasher_state = AUTH_PROBE_EVICTION_HASHER.get_or_init(RandomState::new); +fn auth_probe_eviction_offset_in(shared: &ProxySharedState, peer_ip: IpAddr, now: Instant) -> usize { + let hasher_state = &shared.handshake.auth_probe_eviction_hasher; let mut hasher = hasher_state.build_hasher(); peer_ip.hash(&mut hasher); now.hash(&mut hasher); hasher.finish() as usize } -fn auth_probe_scan_start_offset( +fn auth_probe_scan_start_offset_in( + shared: &ProxySharedState, peer_ip: IpAddr, now: Instant, state_len: usize, @@ -151,12 +137,12 @@ fn auth_probe_scan_start_offset( return 0; } - auth_probe_eviction_offset(peer_ip, now) % state_len + auth_probe_eviction_offset_in(shared, peer_ip, now) % state_len } -fn auth_probe_is_throttled(peer_ip: IpAddr, now: Instant) -> bool { +fn auth_probe_is_throttled_in(shared: &ProxySharedState, peer_ip: IpAddr, now: Instant) -> bool { let peer_ip = normalize_auth_probe_ip(peer_ip); - let state = auth_probe_state_map(); + let state = &shared.handshake.auth_probe; let Some(entry) = state.get(&peer_ip) else { return false; }; @@ -168,9 +154,13 @@ fn auth_probe_is_throttled(peer_ip: IpAddr, now: Instant) -> bool { now < entry.blocked_until } -fn auth_probe_saturation_grace_exhausted(peer_ip: IpAddr, now: Instant) -> bool { +fn auth_probe_saturation_grace_exhausted_in( + shared: &ProxySharedState, + peer_ip: IpAddr, + now: Instant, +) -> bool { let peer_ip = normalize_auth_probe_ip(peer_ip); - let state = auth_probe_state_map(); + let state = &shared.handshake.auth_probe; let Some(entry) = state.get(&peer_ip) else { return false; }; @@ -183,20 +173,28 @@ fn auth_probe_saturation_grace_exhausted(peer_ip: IpAddr, now: Instant) -> bool entry.fail_streak >= AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS } -fn auth_probe_should_apply_preauth_throttle(peer_ip: IpAddr, now: Instant) -> bool { - if !auth_probe_is_throttled(peer_ip, now) { +fn auth_probe_should_apply_preauth_throttle_in( + shared: &ProxySharedState, + peer_ip: IpAddr, + now: Instant, +) -> bool { + if !auth_probe_is_throttled_in(shared, peer_ip, now) { return false; } - if !auth_probe_saturation_is_throttled(now) { + if !auth_probe_saturation_is_throttled_in(shared, now) { return true; } - auth_probe_saturation_grace_exhausted(peer_ip, now) + auth_probe_saturation_grace_exhausted_in(shared, peer_ip, now) } -fn auth_probe_saturation_is_throttled(now: Instant) -> bool { - let mut guard = auth_probe_saturation_state_lock(); +fn auth_probe_saturation_is_throttled_in(shared: &ProxySharedState, now: Instant) -> bool { + let mut guard = shared + .handshake + .auth_probe_saturation + .lock() + .unwrap_or_else(|poisoned| poisoned.into_inner()); let Some(state) = guard.as_mut() else { return false; @@ -214,8 +212,12 @@ fn auth_probe_saturation_is_throttled(now: Instant) -> bool { false } -fn auth_probe_note_saturation(now: Instant) { - let mut guard = auth_probe_saturation_state_lock(); +fn auth_probe_note_saturation_in(shared: &ProxySharedState, now: Instant) { + let mut guard = shared + .handshake + .auth_probe_saturation + .lock() + .unwrap_or_else(|poisoned| poisoned.into_inner()); match guard.as_mut() { Some(state) @@ -237,13 +239,14 @@ fn auth_probe_note_saturation(now: Instant) { } } -fn auth_probe_record_failure(peer_ip: IpAddr, now: Instant) { +fn auth_probe_record_failure_in(shared: &ProxySharedState, peer_ip: IpAddr, now: Instant) { let peer_ip = normalize_auth_probe_ip(peer_ip); - let state = auth_probe_state_map(); - auth_probe_record_failure_with_state(state, peer_ip, now); + let state = &shared.handshake.auth_probe; + auth_probe_record_failure_with_state_in(shared, state, peer_ip, now); } -fn auth_probe_record_failure_with_state( +fn auth_probe_record_failure_with_state_in( + shared: &ProxySharedState, state: &DashMap, peer_ip: IpAddr, now: Instant, @@ -277,7 +280,7 @@ fn auth_probe_record_failure_with_state( while state.len() >= AUTH_PROBE_TRACK_MAX_ENTRIES { rounds += 1; if rounds > 8 { - auth_probe_note_saturation(now); + auth_probe_note_saturation_in(shared, now); let mut eviction_candidate: Option<(IpAddr, u32, Instant)> = None; for entry in state.iter().take(AUTH_PROBE_PRUNE_SCAN_LIMIT) { let key = *entry.key(); @@ -320,7 +323,7 @@ fn auth_probe_record_failure_with_state( } } else { let start_offset = - auth_probe_scan_start_offset(peer_ip, now, state_len, scan_limit); + auth_probe_scan_start_offset_in(shared, peer_ip, now, state_len, scan_limit); let mut scanned = 0usize; for entry in state.iter().skip(start_offset) { let key = *entry.key(); @@ -369,11 +372,11 @@ fn auth_probe_record_failure_with_state( } let Some((evict_key, _, _)) = eviction_candidate else { - auth_probe_note_saturation(now); + auth_probe_note_saturation_in(shared, now); return; }; state.remove(&evict_key); - auth_probe_note_saturation(now); + auth_probe_note_saturation_in(shared, now); } } @@ -387,89 +390,58 @@ fn auth_probe_record_failure_with_state( } } -fn auth_probe_record_success(peer_ip: IpAddr) { +fn auth_probe_record_success_in(shared: &ProxySharedState, peer_ip: IpAddr) { let peer_ip = normalize_auth_probe_ip(peer_ip); - let state = auth_probe_state_map(); + let state = &shared.handshake.auth_probe; state.remove(&peer_ip); } #[cfg(test)] -fn clear_auth_probe_state_for_testing() { - if let Some(state) = AUTH_PROBE_STATE.get() { - state.clear(); - } - if AUTH_PROBE_SATURATION_STATE.get().is_some() { - let mut guard = auth_probe_saturation_state_lock(); - *guard = None; - } +pub(crate) fn auth_probe_record_failure_for_testing( + shared: &ProxySharedState, + peer_ip: IpAddr, + now: Instant, +) { + auth_probe_record_failure_in(shared, peer_ip, now); } #[cfg(test)] -fn auth_probe_fail_streak_for_testing(peer_ip: IpAddr) -> Option { +pub(crate) fn auth_probe_fail_streak_for_testing_in_shared( + shared: &ProxySharedState, + peer_ip: IpAddr, +) -> Option { let peer_ip = normalize_auth_probe_ip(peer_ip); - let state = AUTH_PROBE_STATE.get()?; - state.get(&peer_ip).map(|entry| entry.fail_streak) + shared + .handshake + .auth_probe + .get(&peer_ip) + .map(|entry| entry.fail_streak) } #[cfg(test)] -fn auth_probe_is_throttled_for_testing(peer_ip: IpAddr) -> bool { - auth_probe_is_throttled(peer_ip, Instant::now()) -} - -#[cfg(test)] -fn auth_probe_saturation_is_throttled_for_testing() -> bool { - auth_probe_saturation_is_throttled(Instant::now()) -} - -#[cfg(test)] -fn auth_probe_saturation_is_throttled_at_for_testing(now: Instant) -> bool { - auth_probe_saturation_is_throttled(now) -} - -#[cfg(test)] -fn auth_probe_test_lock() -> &'static Mutex<()> { - static TEST_LOCK: OnceLock> = OnceLock::new(); - TEST_LOCK.get_or_init(|| Mutex::new(())) -} - -#[cfg(test)] -fn unknown_sni_warn_test_lock() -> &'static Mutex<()> { - static TEST_LOCK: OnceLock> = OnceLock::new(); - TEST_LOCK.get_or_init(|| Mutex::new(())) -} - -#[cfg(test)] -fn clear_unknown_sni_warn_state_for_testing() { - if UNKNOWN_SNI_WARN_NEXT_ALLOWED.get().is_some() { - let mut guard = unknown_sni_warn_state_lock(); - *guard = None; +pub(crate) fn clear_auth_probe_state_for_testing_in_shared(shared: &ProxySharedState) { + shared.handshake.auth_probe.clear(); + match shared.handshake.auth_probe_saturation.lock() { + Ok(mut saturation) => { + *saturation = None; + } + Err(poisoned) => { + let mut saturation = poisoned.into_inner(); + *saturation = None; + shared.handshake.auth_probe_saturation.clear_poison(); + } } } -#[cfg(test)] -fn should_emit_unknown_sni_warn_for_testing(now: Instant) -> bool { - should_emit_unknown_sni_warn(now) -} - -#[cfg(test)] -fn clear_warned_secrets_for_testing() { - if let Some(warned) = INVALID_SECRET_WARNED.get() - && let Ok(mut guard) = warned.lock() - { - guard.clear(); - } -} - -#[cfg(test)] -fn warned_secrets_test_lock() -> &'static Mutex<()> { - static TEST_LOCK: OnceLock> = OnceLock::new(); - TEST_LOCK.get_or_init(|| Mutex::new(())) -} - -fn warn_invalid_secret_once(name: &str, reason: &str, expected: usize, got: Option) { +fn warn_invalid_secret_once_in( + shared: &ProxySharedState, + name: &str, + reason: &str, + expected: usize, + got: Option, +) { let key = (name.to_string(), reason.to_string()); - let warned = INVALID_SECRET_WARNED.get_or_init(|| Mutex::new(HashSet::new())); - let should_warn = match warned.lock() { + let should_warn = match shared.handshake.invalid_secret_warned.lock() { Ok(mut guard) => { if !guard.contains(&key) && guard.len() >= WARNED_SECRET_MAX_ENTRIES { false @@ -502,11 +474,12 @@ fn warn_invalid_secret_once(name: &str, reason: &str, expected: usize, got: Opti } } -fn decode_user_secret(name: &str, secret_hex: &str) -> Option> { +fn decode_user_secret(shared: &ProxySharedState, name: &str, secret_hex: &str) -> Option> { match hex::decode(secret_hex) { Ok(bytes) if bytes.len() == ACCESS_SECRET_BYTES => Some(bytes), Ok(bytes) => { - warn_invalid_secret_once( + warn_invalid_secret_once_in( + shared, name, "invalid_length", ACCESS_SECRET_BYTES, @@ -515,7 +488,7 @@ fn decode_user_secret(name: &str, secret_hex: &str) -> Option> { None } Err(_) => { - warn_invalid_secret_once(name, "invalid_hex", ACCESS_SECRET_BYTES, None); + warn_invalid_secret_once_in(shared, name, "invalid_hex", ACCESS_SECRET_BYTES, None); None } } @@ -543,7 +516,8 @@ fn mode_enabled_for_proto(config: &ProxyConfig, proto_tag: ProtoTag, is_tls: boo } } -fn decode_user_secrets( +fn decode_user_secrets_in( + shared: &ProxySharedState, config: &ProxyConfig, preferred_user: Option<&str>, ) -> Vec<(String, Vec)> { @@ -551,7 +525,7 @@ fn decode_user_secrets( if let Some(preferred) = preferred_user && let Some(secret_hex) = config.access.users.get(preferred) - && let Some(bytes) = decode_user_secret(preferred, secret_hex) + && let Some(bytes) = decode_user_secret(shared, preferred, secret_hex) { secrets.push((preferred.to_string(), bytes)); } @@ -560,7 +534,7 @@ fn decode_user_secrets( if preferred_user.is_some_and(|preferred| preferred == name.as_str()) { continue; } - if let Some(bytes) = decode_user_secret(name, secret_hex) { + if let Some(bytes) = decode_user_secret(shared, name, secret_hex) { secrets.push((name.clone(), bytes)); } } @@ -568,6 +542,86 @@ fn decode_user_secrets( secrets } +#[cfg(test)] +pub(crate) fn auth_probe_state_for_testing_in_shared( + shared: &ProxySharedState, +) -> &DashMap { + &shared.handshake.auth_probe +} + +#[cfg(test)] +pub(crate) fn auth_probe_saturation_state_for_testing_in_shared( + shared: &ProxySharedState, +) -> &Mutex> { + &shared.handshake.auth_probe_saturation +} + +#[cfg(test)] +pub(crate) fn auth_probe_saturation_state_lock_for_testing_in_shared( + shared: &ProxySharedState, +) -> std::sync::MutexGuard<'_, Option> { + shared + .handshake + .auth_probe_saturation + .lock() + .unwrap_or_else(|poisoned| poisoned.into_inner()) +} + +#[cfg(test)] +pub(crate) fn clear_unknown_sni_warn_state_for_testing_in_shared(shared: &ProxySharedState) { + let mut guard = shared + .handshake + .unknown_sni_warn_next_allowed + .lock() + .unwrap_or_else(|poisoned| poisoned.into_inner()); + *guard = None; +} + +#[cfg(test)] +pub(crate) fn should_emit_unknown_sni_warn_for_testing_in_shared( + shared: &ProxySharedState, + now: Instant, +) -> bool { + should_emit_unknown_sni_warn_in(shared, now) +} + +#[cfg(test)] +pub(crate) fn clear_warned_secrets_for_testing_in_shared(shared: &ProxySharedState) { + if let Ok(mut guard) = shared.handshake.invalid_secret_warned.lock() { + guard.clear(); + } +} + +#[cfg(test)] +pub(crate) fn warned_secrets_for_testing_in_shared( + shared: &ProxySharedState, +) -> &Mutex> { + &shared.handshake.invalid_secret_warned +} + +#[cfg(test)] +pub(crate) fn auth_probe_is_throttled_for_testing_in_shared( + shared: &ProxySharedState, + peer_ip: IpAddr, +) -> bool { + auth_probe_is_throttled_in(shared, peer_ip, Instant::now()) +} + +#[cfg(test)] +pub(crate) fn auth_probe_saturation_is_throttled_for_testing_in_shared( + shared: &ProxySharedState, +) -> bool { + auth_probe_saturation_is_throttled_in(shared, Instant::now()) +} + +#[cfg(test)] +pub(crate) fn auth_probe_saturation_is_throttled_at_for_testing_in_shared( + shared: &ProxySharedState, + now: Instant, +) -> bool { + auth_probe_saturation_is_throttled_in(shared, now) +} + #[inline] fn find_matching_tls_domain<'a>(config: &'a ProxyConfig, sni: &str) -> Option<&'a str> { if config.censorship.tls_domain.eq_ignore_ascii_case(sni) { @@ -635,6 +689,7 @@ impl Drop for HandshakeSuccess { } /// Handle fake TLS handshake +#[cfg(test)] pub async fn handle_tls_handshake( handshake: &[u8], reader: R, @@ -645,6 +700,65 @@ pub async fn handle_tls_handshake( rng: &SecureRandom, tls_cache: Option>, ) -> HandshakeResult<(FakeTlsReader, FakeTlsWriter, String), R, W> +where + R: AsyncRead + Unpin, + W: AsyncWrite + Unpin, +{ + let shared = ProxySharedState::new(); + handle_tls_handshake_impl( + handshake, + reader, + writer, + peer, + config, + replay_checker, + rng, + tls_cache, + shared.as_ref(), + ) + .await +} + +pub async fn handle_tls_handshake_with_shared( + handshake: &[u8], + reader: R, + writer: W, + peer: SocketAddr, + config: &ProxyConfig, + replay_checker: &ReplayChecker, + rng: &SecureRandom, + tls_cache: Option>, + shared: &ProxySharedState, +) -> HandshakeResult<(FakeTlsReader, FakeTlsWriter, String), R, W> +where + R: AsyncRead + Unpin, + W: AsyncWrite + Unpin, +{ + handle_tls_handshake_impl( + handshake, + reader, + writer, + peer, + config, + replay_checker, + rng, + tls_cache, + shared, + ) + .await +} + +async fn handle_tls_handshake_impl( + handshake: &[u8], + reader: R, + mut writer: W, + peer: SocketAddr, + config: &ProxyConfig, + replay_checker: &ReplayChecker, + rng: &SecureRandom, + tls_cache: Option>, + shared: &ProxySharedState, +) -> HandshakeResult<(FakeTlsReader, FakeTlsWriter, String), R, W> where R: AsyncRead + Unpin, W: AsyncWrite + Unpin, @@ -652,14 +766,14 @@ where debug!(peer = %peer, handshake_len = handshake.len(), "Processing TLS handshake"); let throttle_now = Instant::now(); - if auth_probe_should_apply_preauth_throttle(peer.ip(), throttle_now) { + if auth_probe_should_apply_preauth_throttle_in(shared, peer.ip(), throttle_now) { maybe_apply_server_hello_delay(config).await; debug!(peer = %peer, "TLS handshake rejected by pre-auth probe throttle"); return HandshakeResult::BadClient { reader, writer }; } if handshake.len() < tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN + 1 { - auth_probe_record_failure(peer.ip(), Instant::now()); + auth_probe_record_failure_in(shared, peer.ip(), Instant::now()); maybe_apply_server_hello_delay(config).await; debug!(peer = %peer, "TLS handshake too short"); return HandshakeResult::BadClient { reader, writer }; @@ -695,11 +809,11 @@ where }; if client_sni.is_some() && matched_tls_domain.is_none() && preferred_user_hint.is_none() { - auth_probe_record_failure(peer.ip(), Instant::now()); + auth_probe_record_failure_in(shared, peer.ip(), Instant::now()); maybe_apply_server_hello_delay(config).await; let sni = client_sni.as_deref().unwrap_or_default(); let log_now = Instant::now(); - if should_emit_unknown_sni_warn(log_now) { + if should_emit_unknown_sni_warn_in(shared, log_now) { warn!( peer = %peer, sni = %sni, @@ -722,7 +836,7 @@ where }; } - let secrets = decode_user_secrets(config, preferred_user_hint); + let secrets = decode_user_secrets_in(shared, config, preferred_user_hint); let validation = match tls::validate_tls_handshake_with_replay_window( handshake, @@ -732,7 +846,7 @@ where ) { Some(v) => v, None => { - auth_probe_record_failure(peer.ip(), Instant::now()); + auth_probe_record_failure_in(shared, peer.ip(), Instant::now()); maybe_apply_server_hello_delay(config).await; debug!( peer = %peer, @@ -746,7 +860,7 @@ where // Reject known replay digests before expensive cache/domain/ALPN policy work. let digest_half = &validation.digest[..tls::TLS_DIGEST_HALF_LEN]; if replay_checker.check_tls_digest(digest_half) { - auth_probe_record_failure(peer.ip(), Instant::now()); + auth_probe_record_failure_in(shared, peer.ip(), Instant::now()); maybe_apply_server_hello_delay(config).await; warn!(peer = %peer, "TLS replay attack detected (duplicate digest)"); return HandshakeResult::BadClient { reader, writer }; @@ -827,7 +941,7 @@ where "TLS handshake successful" ); - auth_probe_record_success(peer.ip()); + auth_probe_record_success_in(shared, peer.ip()); HandshakeResult::Success(( FakeTlsReader::new(reader), @@ -837,6 +951,7 @@ where } /// Handle MTProto obfuscation handshake +#[cfg(test)] pub async fn handle_mtproto_handshake( handshake: &[u8; HANDSHAKE_LEN], reader: R, @@ -847,6 +962,65 @@ pub async fn handle_mtproto_handshake( is_tls: bool, preferred_user: Option<&str>, ) -> HandshakeResult<(CryptoReader, CryptoWriter, HandshakeSuccess), R, W> +where + R: AsyncRead + Unpin + Send, + W: AsyncWrite + Unpin + Send, +{ + let shared = ProxySharedState::new(); + handle_mtproto_handshake_impl( + handshake, + reader, + writer, + peer, + config, + replay_checker, + is_tls, + preferred_user, + shared.as_ref(), + ) + .await +} + +pub async fn handle_mtproto_handshake_with_shared( + handshake: &[u8; HANDSHAKE_LEN], + reader: R, + writer: W, + peer: SocketAddr, + config: &ProxyConfig, + replay_checker: &ReplayChecker, + is_tls: bool, + preferred_user: Option<&str>, + shared: &ProxySharedState, +) -> HandshakeResult<(CryptoReader, CryptoWriter, HandshakeSuccess), R, W> +where + R: AsyncRead + Unpin + Send, + W: AsyncWrite + Unpin + Send, +{ + handle_mtproto_handshake_impl( + handshake, + reader, + writer, + peer, + config, + replay_checker, + is_tls, + preferred_user, + shared, + ) + .await +} + +async fn handle_mtproto_handshake_impl( + handshake: &[u8; HANDSHAKE_LEN], + reader: R, + writer: W, + peer: SocketAddr, + config: &ProxyConfig, + replay_checker: &ReplayChecker, + is_tls: bool, + preferred_user: Option<&str>, + shared: &ProxySharedState, +) -> HandshakeResult<(CryptoReader, CryptoWriter, HandshakeSuccess), R, W> where R: AsyncRead + Unpin + Send, W: AsyncWrite + Unpin + Send, @@ -862,7 +1036,7 @@ where ); let throttle_now = Instant::now(); - if auth_probe_should_apply_preauth_throttle(peer.ip(), throttle_now) { + if auth_probe_should_apply_preauth_throttle_in(shared, peer.ip(), throttle_now) { maybe_apply_server_hello_delay(config).await; debug!(peer = %peer, "MTProto handshake rejected by pre-auth probe throttle"); return HandshakeResult::BadClient { reader, writer }; @@ -872,7 +1046,7 @@ where let enc_prekey_iv: Vec = dec_prekey_iv.iter().rev().copied().collect(); - let decoded_users = decode_user_secrets(config, preferred_user); + let decoded_users = decode_user_secrets_in(shared, config, preferred_user); for (user, secret) in decoded_users { let dec_prekey = &dec_prekey_iv[..PREKEY_LEN]; @@ -932,7 +1106,7 @@ where // entry from the cache. We accept the cost of performing the full // authentication check first to avoid poisoning the replay cache. if replay_checker.check_and_add_handshake(dec_prekey_iv) { - auth_probe_record_failure(peer.ip(), Instant::now()); + auth_probe_record_failure_in(shared, peer.ip(), Instant::now()); maybe_apply_server_hello_delay(config).await; warn!(peer = %peer, user = %user, "MTProto replay attack detected"); return HandshakeResult::BadClient { reader, writer }; @@ -959,7 +1133,7 @@ where "MTProto handshake successful" ); - auth_probe_record_success(peer.ip()); + auth_probe_record_success_in(shared, peer.ip()); let max_pending = config.general.crypto_pending_buffer; return HandshakeResult::Success(( @@ -969,7 +1143,7 @@ where )); } - auth_probe_record_failure(peer.ip(), Instant::now()); + auth_probe_record_failure_in(shared, peer.ip(), Instant::now()); maybe_apply_server_hello_delay(config).await; debug!(peer = %peer, "MTProto handshake: no matching user found"); HandshakeResult::BadClient { reader, writer } diff --git a/src/proxy/middle_relay.rs b/src/proxy/middle_relay.rs index 104cedf..6d3fd1a 100644 --- a/src/proxy/middle_relay.rs +++ b/src/proxy/middle_relay.rs @@ -1,14 +1,16 @@ -use std::collections::hash_map::RandomState; +#[cfg(test)] +use std::collections::hash_map::DefaultHasher; use std::collections::{BTreeSet, HashMap}; #[cfg(test)] use std::future::Future; use std::hash::{BuildHasher, Hash}; +#[cfg(test)] +use std::hash::Hasher; use std::net::{IpAddr, SocketAddr}; use std::sync::atomic::{AtomicU64, Ordering}; -use std::sync::{Arc, Mutex, OnceLock}; +use std::sync::Arc; use std::time::{Duration, Instant}; -use dashmap::DashMap; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; use tokio::sync::{mpsc, oneshot, watch}; use tokio::time::timeout; @@ -19,6 +21,7 @@ use crate::crypto::SecureRandom; use crate::error::{ProxyError, Result}; use crate::protocol::constants::{secure_padding_len, *}; use crate::proxy::handshake::HandshakeSuccess; +use crate::proxy::shared_state::ProxySharedState; use crate::proxy::route_mode::{ ROUTE_SWITCH_ERROR_MSG, RelayRouteMode, RouteCutoverState, affected_cutover_state, cutover_stagger_delay, @@ -51,19 +54,9 @@ const ME_D2C_FLUSH_BATCH_MAX_BYTES_MIN: usize = 4096; const ME_D2C_FRAME_BUF_SHRINK_HYSTERESIS_FACTOR: usize = 2; const ME_D2C_SINGLE_WRITE_COALESCE_MAX_BYTES: usize = 128 * 1024; const QUOTA_RESERVE_SPIN_RETRIES: usize = 32; -static DESYNC_DEDUP: OnceLock> = OnceLock::new(); -static DESYNC_DEDUP_PREVIOUS: OnceLock> = OnceLock::new(); -static DESYNC_HASHER: OnceLock = OnceLock::new(); -static DESYNC_FULL_CACHE_LAST_EMIT_AT: OnceLock>> = OnceLock::new(); -static DESYNC_DEDUP_ROTATION_STATE: OnceLock> = OnceLock::new(); -// Invariant for async callers: -// this std::sync::Mutex is allowed only because critical sections are short, -// synchronous, and MUST never cross an `.await`. -static RELAY_IDLE_CANDIDATE_REGISTRY: OnceLock> = OnceLock::new(); -static RELAY_IDLE_MARK_SEQ: AtomicU64 = AtomicU64::new(0); #[derive(Default)] -struct DesyncDedupRotationState { +pub(crate) struct DesyncDedupRotationState { current_started_at: Option, } @@ -80,7 +73,7 @@ struct RelayForensicsState { } #[derive(Default)] -struct RelayIdleCandidateRegistry { +pub(crate) struct RelayIdleCandidateRegistry { by_conn_id: HashMap, ordered: BTreeSet<(u64, u64)>, pressure_event_seq: u64, @@ -93,20 +86,14 @@ struct RelayIdleCandidateMeta { mark_pressure_seq: u64, } -fn relay_idle_candidate_registry() -> &'static Mutex { - RELAY_IDLE_CANDIDATE_REGISTRY.get_or_init(|| Mutex::new(RelayIdleCandidateRegistry::default())) -} - -fn relay_idle_candidate_registry_lock() -> std::sync::MutexGuard<'static, RelayIdleCandidateRegistry> -{ - // Keep lock scope narrow and synchronous: callers must drop guard before any `.await`. - let registry = relay_idle_candidate_registry(); +fn relay_idle_candidate_registry_lock_in( + shared: &ProxySharedState, +) -> std::sync::MutexGuard<'_, RelayIdleCandidateRegistry> { + let registry = &shared.middle_relay.relay_idle_registry; match registry.lock() { Ok(guard) => guard, Err(poisoned) => { let mut guard = poisoned.into_inner(); - // Fail closed after panic while holding registry lock: drop all - // candidates and pressure cursors to avoid stale cross-session state. *guard = RelayIdleCandidateRegistry::default(); registry.clear_poison(); guard @@ -114,14 +101,16 @@ fn relay_idle_candidate_registry_lock() -> std::sync::MutexGuard<'static, RelayI } } -fn mark_relay_idle_candidate(conn_id: u64) -> bool { - let mut guard = relay_idle_candidate_registry_lock(); +fn mark_relay_idle_candidate_in(shared: &ProxySharedState, conn_id: u64) -> bool { + let mut guard = relay_idle_candidate_registry_lock_in(shared); if guard.by_conn_id.contains_key(&conn_id) { return false; } - let mark_order_seq = RELAY_IDLE_MARK_SEQ + let mark_order_seq = shared + .middle_relay + .relay_idle_mark_seq .fetch_add(1, Ordering::Relaxed) .saturating_add(1); let meta = RelayIdleCandidateMeta { @@ -133,36 +122,31 @@ fn mark_relay_idle_candidate(conn_id: u64) -> bool { true } -fn clear_relay_idle_candidate(conn_id: u64) { - let mut guard = relay_idle_candidate_registry_lock(); +fn clear_relay_idle_candidate_in(shared: &ProxySharedState, conn_id: u64) { + let mut guard = relay_idle_candidate_registry_lock_in(shared); if let Some(meta) = guard.by_conn_id.remove(&conn_id) { guard.ordered.remove(&(meta.mark_order_seq, conn_id)); } } -#[cfg(test)] -fn oldest_relay_idle_candidate() -> Option { - let guard = relay_idle_candidate_registry_lock(); - guard.ordered.iter().next().map(|(_, conn_id)| *conn_id) -} - -fn note_relay_pressure_event() { - let mut guard = relay_idle_candidate_registry_lock(); +fn note_relay_pressure_event_in(shared: &ProxySharedState) { + let mut guard = relay_idle_candidate_registry_lock_in(shared); guard.pressure_event_seq = guard.pressure_event_seq.wrapping_add(1); } -fn relay_pressure_event_seq() -> u64 { - let guard = relay_idle_candidate_registry_lock(); +fn relay_pressure_event_seq_in(shared: &ProxySharedState) -> u64 { + let guard = relay_idle_candidate_registry_lock_in(shared); guard.pressure_event_seq } -fn maybe_evict_idle_candidate_on_pressure( +fn maybe_evict_idle_candidate_on_pressure_in( + shared: &ProxySharedState, conn_id: u64, seen_pressure_seq: &mut u64, stats: &Stats, ) -> bool { - let mut guard = relay_idle_candidate_registry_lock(); + let mut guard = relay_idle_candidate_registry_lock_in(shared); let latest_pressure_seq = guard.pressure_event_seq; if latest_pressure_seq == *seen_pressure_seq { @@ -192,7 +176,6 @@ fn maybe_evict_idle_candidate_on_pressure( return false; }; - // Pressure events that happened before candidate soft-mark are stale for this candidate. if latest_pressure_seq == candidate_meta.mark_pressure_seq { return false; } @@ -205,15 +188,6 @@ fn maybe_evict_idle_candidate_on_pressure( true } -#[cfg(test)] -fn clear_relay_idle_pressure_state_for_testing() { - if RELAY_IDLE_CANDIDATE_REGISTRY.get().is_some() { - let mut guard = relay_idle_candidate_registry_lock(); - *guard = RelayIdleCandidateRegistry::default(); - } - RELAY_IDLE_MARK_SEQ.store(0, Ordering::Relaxed); -} - #[derive(Clone, Copy)] struct MeD2cFlushPolicy { max_frames: usize, @@ -235,31 +209,41 @@ struct RelayClientIdlePolicy { impl RelayClientIdlePolicy { fn from_config(config: &ProxyConfig) -> Self { + let frame_read_timeout = + Duration::from_secs(config.timeouts.relay_client_idle_hard_secs.max(1)); + if !config.timeouts.relay_idle_policy_v2_enabled { + return Self::disabled(frame_read_timeout); + } + + let soft_idle = Duration::from_secs(config.timeouts.relay_client_idle_soft_secs.max(1)); + let hard_idle = Duration::from_secs(config.timeouts.relay_client_idle_hard_secs.max(1)); + let grace_after_downstream_activity = Duration::from_secs( + config + .timeouts + .relay_idle_grace_after_downstream_activity_secs, + ); + Self { - enabled: config.timeouts.relay_idle_policy_v2_enabled, - soft_idle: Duration::from_secs(config.timeouts.relay_client_idle_soft_secs.max(1)), - hard_idle: Duration::from_secs(config.timeouts.relay_client_idle_hard_secs.max(1)), - grace_after_downstream_activity: Duration::from_secs( - config - .timeouts - .relay_idle_grace_after_downstream_activity_secs, - ), - legacy_frame_read_timeout: Duration::from_secs(config.timeouts.client_handshake.max(1)), + enabled: true, + soft_idle, + hard_idle, + grace_after_downstream_activity, + legacy_frame_read_timeout: frame_read_timeout, } } - #[cfg(test)] fn disabled(frame_read_timeout: Duration) -> Self { Self { enabled: false, - soft_idle: Duration::from_secs(0), - hard_idle: Duration::from_secs(0), - grace_after_downstream_activity: Duration::from_secs(0), + soft_idle: frame_read_timeout, + hard_idle: frame_read_timeout, + grace_after_downstream_activity: Duration::ZERO, legacy_frame_read_timeout: frame_read_timeout, } } } +#[derive(Clone, Copy)] struct RelayClientIdleState { last_client_frame_at: Instant, soft_idle_marked: bool, @@ -303,24 +287,39 @@ impl MeD2cFlushPolicy { } } +#[cfg(test)] fn hash_value(value: &T) -> u64 { - let state = DESYNC_HASHER.get_or_init(RandomState::new); - state.hash_one(value) + let mut hasher = DefaultHasher::new(); + value.hash(&mut hasher); + hasher.finish() } +fn hash_value_in(shared: &ProxySharedState, value: &T) -> u64 { + shared.middle_relay.desync_hasher.hash_one(value) +} + +#[cfg(test)] fn hash_ip(ip: IpAddr) -> u64 { hash_value(&ip) } -fn should_emit_full_desync(key: u64, all_full: bool, now: Instant) -> bool { +fn hash_ip_in(shared: &ProxySharedState, ip: IpAddr) -> u64 { + hash_value_in(shared, &ip) +} + +fn should_emit_full_desync_in( + shared: &ProxySharedState, + key: u64, + all_full: bool, + now: Instant, +) -> bool { if all_full { return true; } - let dedup_current = DESYNC_DEDUP.get_or_init(DashMap::new); - let dedup_previous = DESYNC_DEDUP_PREVIOUS.get_or_init(DashMap::new); - let rotation_state = - DESYNC_DEDUP_ROTATION_STATE.get_or_init(|| Mutex::new(DesyncDedupRotationState::default())); + let dedup_current = &shared.middle_relay.desync_dedup; + let dedup_previous = &shared.middle_relay.desync_dedup_previous; + let rotation_state = &shared.middle_relay.desync_dedup_rotation_state; let mut state = match rotation_state.lock() { Ok(guard) => guard, @@ -366,8 +365,6 @@ fn should_emit_full_desync(key: u64, all_full: bool, now: Instant) -> bool { None => true, }; if within_window { - // Keep the original timestamp when promoting from previous bucket, - // so dedup expiry remains tied to first-seen time. dedup_current.insert(key, seen_at); return false; } @@ -375,8 +372,6 @@ fn should_emit_full_desync(key: u64, all_full: bool, now: Instant) -> bool { } if dedup_current.len() >= DESYNC_DEDUP_MAX_ENTRIES { - // Bounded eviction path: rotate buckets instead of scanning/evicting - // arbitrary entries from a saturated single map. dedup_previous.clear(); for entry in dedup_current.iter() { dedup_previous.insert(*entry.key(), *entry.value()); @@ -384,15 +379,15 @@ fn should_emit_full_desync(key: u64, all_full: bool, now: Instant) -> bool { dedup_current.clear(); state.current_started_at = Some(now); dedup_current.insert(key, now); - should_emit_full_desync_full_cache(now) + should_emit_full_desync_full_cache_in(shared, now) } else { dedup_current.insert(key, now); true } } -fn should_emit_full_desync_full_cache(now: Instant) -> bool { - let gate = DESYNC_FULL_CACHE_LAST_EMIT_AT.get_or_init(|| Mutex::new(None)); +fn should_emit_full_desync_full_cache_in(shared: &ProxySharedState, now: Instant) -> bool { + let gate = &shared.middle_relay.desync_full_cache_last_emit_at; let Ok(mut last_emit_at) = gate.lock() else { return false; }; @@ -417,46 +412,6 @@ fn should_emit_full_desync_full_cache(now: Instant) -> bool { } } -#[cfg(test)] -fn clear_desync_dedup_for_testing() { - if let Some(dedup) = DESYNC_DEDUP.get() { - dedup.clear(); - } - if let Some(dedup_previous) = DESYNC_DEDUP_PREVIOUS.get() { - dedup_previous.clear(); - } - if let Some(rotation_state) = DESYNC_DEDUP_ROTATION_STATE.get() { - match rotation_state.lock() { - Ok(mut guard) => { - *guard = DesyncDedupRotationState::default(); - } - Err(poisoned) => { - let mut guard = poisoned.into_inner(); - *guard = DesyncDedupRotationState::default(); - rotation_state.clear_poison(); - } - } - } - if let Some(last_emit_at) = DESYNC_FULL_CACHE_LAST_EMIT_AT.get() { - match last_emit_at.lock() { - Ok(mut guard) => { - *guard = None; - } - Err(poisoned) => { - let mut guard = poisoned.into_inner(); - *guard = None; - last_emit_at.clear_poison(); - } - } - } -} - -#[cfg(test)] -fn desync_dedup_test_lock() -> &'static Mutex<()> { - static TEST_LOCK: OnceLock> = OnceLock::new(); - TEST_LOCK.get_or_init(|| Mutex::new(())) -} - fn desync_forensics_len_bytes(len: usize) -> ([u8; 4], bool) { match u32::try_from(len) { Ok(value) => (value.to_le_bytes(), false), @@ -464,7 +419,8 @@ fn desync_forensics_len_bytes(len: usize) -> ([u8; 4], bool) { } } -fn report_desync_frame_too_large( +fn report_desync_frame_too_large_in( + shared: &ProxySharedState, state: &RelayForensicsState, proto_tag: ProtoTag, frame_counter: u64, @@ -482,13 +438,13 @@ fn report_desync_frame_too_large( .map(|b| matches!(b[0], b'G' | b'P' | b'H' | b'C' | b'D')) .unwrap_or(false); let now = Instant::now(); - let dedup_key = hash_value(&( + let dedup_key = hash_value_in(shared, &( state.user.as_str(), state.peer_hash, proto_tag, DESYNC_ERROR_CLASS, )); - let emit_full = should_emit_full_desync(dedup_key, state.desync_all_full, now); + let emit_full = should_emit_full_desync_in(shared, dedup_key, state.desync_all_full, now); let duration_ms = state.started_at.elapsed().as_millis() as u64; let bytes_me2c = state.bytes_me2c.load(Ordering::Relaxed); @@ -557,6 +513,29 @@ fn report_desync_frame_too_large( )) } +#[cfg(test)] +fn report_desync_frame_too_large( + state: &RelayForensicsState, + proto_tag: ProtoTag, + frame_counter: u64, + max_frame: usize, + len: usize, + raw_len_bytes: Option<[u8; 4]>, + stats: &Stats, +) -> ProxyError { + let shared = ProxySharedState::new(); + report_desync_frame_too_large_in( + shared.as_ref(), + state, + proto_tag, + frame_counter, + max_frame, + len, + raw_len_bytes, + stats, + ) +} + fn should_yield_c2me_sender(sent_since_yield: usize, has_backlog: bool) -> bool { has_backlog && sent_since_yield >= C2ME_SENDER_FAIRNESS_BUDGET } @@ -629,19 +608,263 @@ fn observe_me_d2c_flush_event( } #[cfg(test)] -fn relay_idle_pressure_test_guard() -> &'static Mutex<()> { - static TEST_LOCK: OnceLock> = OnceLock::new(); - TEST_LOCK.get_or_init(|| Mutex::new(())) +pub(crate) fn mark_relay_idle_candidate_for_testing(shared: &ProxySharedState, conn_id: u64) -> bool { + let registry = &shared.middle_relay.relay_idle_registry; + let mut guard = match registry.lock() { + Ok(guard) => guard, + Err(poisoned) => { + let mut guard = poisoned.into_inner(); + *guard = RelayIdleCandidateRegistry::default(); + registry.clear_poison(); + guard + } + }; + + if guard.by_conn_id.contains_key(&conn_id) { + return false; + } + + let mark_order_seq = shared + .middle_relay + .relay_idle_mark_seq + .fetch_add(1, Ordering::Relaxed); + let mark_pressure_seq = guard.pressure_event_seq; + let meta = RelayIdleCandidateMeta { + mark_order_seq, + mark_pressure_seq, + }; + guard.by_conn_id.insert(conn_id, meta); + guard.ordered.insert((mark_order_seq, conn_id)); + true } #[cfg(test)] -pub(crate) fn relay_idle_pressure_test_scope() -> std::sync::MutexGuard<'static, ()> { - relay_idle_pressure_test_guard() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()) +pub(crate) fn oldest_relay_idle_candidate_for_testing(shared: &ProxySharedState) -> Option { + let registry = &shared.middle_relay.relay_idle_registry; + let guard = match registry.lock() { + Ok(guard) => guard, + Err(poisoned) => { + let mut guard = poisoned.into_inner(); + *guard = RelayIdleCandidateRegistry::default(); + registry.clear_poison(); + guard + } + }; + guard.ordered.iter().next().map(|(_, conn_id)| *conn_id) } -async fn enqueue_c2me_command( +#[cfg(test)] +pub(crate) fn clear_relay_idle_candidate_for_testing(shared: &ProxySharedState, conn_id: u64) { + let registry = &shared.middle_relay.relay_idle_registry; + let mut guard = match registry.lock() { + Ok(guard) => guard, + Err(poisoned) => { + let mut guard = poisoned.into_inner(); + *guard = RelayIdleCandidateRegistry::default(); + registry.clear_poison(); + guard + } + }; + if let Some(meta) = guard.by_conn_id.remove(&conn_id) { + guard.ordered.remove(&(meta.mark_order_seq, conn_id)); + } +} + +#[cfg(test)] +pub(crate) fn clear_relay_idle_pressure_state_for_testing_in_shared(shared: &ProxySharedState) { + if let Ok(mut guard) = shared.middle_relay.relay_idle_registry.lock() { + *guard = RelayIdleCandidateRegistry::default(); + } + shared + .middle_relay + .relay_idle_mark_seq + .store(0, Ordering::Relaxed); +} + +#[cfg(test)] +pub(crate) fn note_relay_pressure_event_for_testing(shared: &ProxySharedState) { + note_relay_pressure_event_in(shared); +} + +#[cfg(test)] +pub(crate) fn relay_pressure_event_seq_for_testing(shared: &ProxySharedState) -> u64 { + relay_pressure_event_seq_in(shared) +} + +#[cfg(test)] +pub(crate) fn relay_idle_mark_seq_for_testing(shared: &ProxySharedState) -> u64 { + shared.middle_relay.relay_idle_mark_seq.load(Ordering::Relaxed) +} + +#[cfg(test)] +pub(crate) fn maybe_evict_idle_candidate_on_pressure_for_testing( + shared: &ProxySharedState, + conn_id: u64, + seen_pressure_seq: &mut u64, + stats: &Stats, +) -> bool { + maybe_evict_idle_candidate_on_pressure_in(shared, conn_id, seen_pressure_seq, stats) +} + +#[cfg(test)] +pub(crate) fn set_relay_pressure_state_for_testing( + shared: &ProxySharedState, + pressure_event_seq: u64, + pressure_consumed_seq: u64, +) { + let registry = &shared.middle_relay.relay_idle_registry; + let mut guard = match registry.lock() { + Ok(guard) => guard, + Err(poisoned) => { + let mut guard = poisoned.into_inner(); + *guard = RelayIdleCandidateRegistry::default(); + registry.clear_poison(); + guard + } + }; + guard.pressure_event_seq = pressure_event_seq; + guard.pressure_consumed_seq = pressure_consumed_seq; +} + +#[cfg(test)] +pub(crate) fn should_emit_full_desync_for_testing( + shared: &ProxySharedState, + key: u64, + all_full: bool, + now: Instant, +) -> bool { + if all_full { + return true; + } + + let dedup_current = &shared.middle_relay.desync_dedup; + let dedup_previous = &shared.middle_relay.desync_dedup_previous; + + let Ok(mut state) = shared.middle_relay.desync_dedup_rotation_state.lock() else { + return false; + }; + + let rotate_now = match state.current_started_at { + Some(current_started_at) => match now.checked_duration_since(current_started_at) { + Some(elapsed) => elapsed >= DESYNC_DEDUP_WINDOW, + None => true, + }, + None => true, + }; + if rotate_now { + dedup_previous.clear(); + for entry in dedup_current.iter() { + dedup_previous.insert(*entry.key(), *entry.value()); + } + dedup_current.clear(); + state.current_started_at = Some(now); + } + + if let Some(seen_at) = dedup_current.get(&key).map(|entry| *entry.value()) { + let within_window = match now.checked_duration_since(seen_at) { + Some(elapsed) => elapsed < DESYNC_DEDUP_WINDOW, + None => true, + }; + if within_window { + return false; + } + dedup_current.insert(key, now); + return true; + } + + if let Some(seen_at) = dedup_previous.get(&key).map(|entry| *entry.value()) { + let within_window = match now.checked_duration_since(seen_at) { + Some(elapsed) => elapsed < DESYNC_DEDUP_WINDOW, + None => true, + }; + if within_window { + dedup_current.insert(key, seen_at); + return false; + } + dedup_previous.remove(&key); + } + + if dedup_current.len() >= DESYNC_DEDUP_MAX_ENTRIES { + dedup_previous.clear(); + for entry in dedup_current.iter() { + dedup_previous.insert(*entry.key(), *entry.value()); + } + dedup_current.clear(); + state.current_started_at = Some(now); + dedup_current.insert(key, now); + let Ok(mut last_emit_at) = shared.middle_relay.desync_full_cache_last_emit_at.lock() else { + return false; + }; + return match *last_emit_at { + None => { + *last_emit_at = Some(now); + true + } + Some(last) => { + let Some(elapsed) = now.checked_duration_since(last) else { + *last_emit_at = Some(now); + return true; + }; + if elapsed >= DESYNC_FULL_CACHE_EMIT_MIN_INTERVAL { + *last_emit_at = Some(now); + true + } else { + false + } + } + }; + } + + dedup_current.insert(key, now); + true +} + +#[cfg(test)] +pub(crate) fn clear_desync_dedup_for_testing_in_shared(shared: &ProxySharedState) { + shared.middle_relay.desync_dedup.clear(); + shared.middle_relay.desync_dedup_previous.clear(); + if let Ok(mut rotation_state) = shared.middle_relay.desync_dedup_rotation_state.lock() { + *rotation_state = DesyncDedupRotationState::default(); + } + if let Ok(mut last_emit_at) = shared.middle_relay.desync_full_cache_last_emit_at.lock() { + *last_emit_at = None; + } +} + +#[cfg(test)] +pub(crate) fn desync_dedup_len_for_testing(shared: &ProxySharedState) -> usize { + shared.middle_relay.desync_dedup.len() +} + +#[cfg(test)] +pub(crate) fn desync_dedup_insert_for_testing(shared: &ProxySharedState, key: u64, at: Instant) { + shared.middle_relay.desync_dedup.insert(key, at); +} + +#[cfg(test)] +pub(crate) fn desync_dedup_get_for_testing( + shared: &ProxySharedState, + key: u64, +) -> Option { + shared + .middle_relay + .desync_dedup + .get(&key) + .map(|entry| *entry.value()) +} + +#[cfg(test)] +pub(crate) fn desync_dedup_keys_for_testing(shared: &ProxySharedState) -> std::collections::HashSet { + shared + .middle_relay + .desync_dedup + .iter() + .map(|entry| *entry.key()) + .collect() +} + +async fn enqueue_c2me_command_in( + shared: &ProxySharedState, tx: &mpsc::Sender, cmd: C2MeCommand, send_timeout: Option, @@ -653,7 +876,7 @@ async fn enqueue_c2me_command( Err(mpsc::error::TrySendError::Full(cmd)) => { stats.increment_me_c2me_send_full_total(); stats.increment_me_c2me_send_high_water_total(); - note_relay_pressure_event(); + note_relay_pressure_event_in(shared); // Cooperative yield reduces burst catch-up when the per-conn queue is near saturation. if tx.capacity() <= C2ME_SOFT_PRESSURE_MIN_FREE_SLOTS { tokio::task::yield_now().await; @@ -682,6 +905,17 @@ async fn enqueue_c2me_command( } } +#[cfg(test)] +async fn enqueue_c2me_command( + tx: &mpsc::Sender, + cmd: C2MeCommand, + send_timeout: Option, + stats: &Stats, +) -> std::result::Result<(), mpsc::error::SendError> { + let shared = ProxySharedState::new(); + enqueue_c2me_command_in(shared.as_ref(), tx, cmd, send_timeout, stats).await +} + #[cfg(test)] async fn run_relay_test_step_timeout(context: &'static str, fut: F) -> T where @@ -705,6 +939,7 @@ pub(crate) async fn handle_via_middle_proxy( mut route_rx: watch::Receiver, route_snapshot: RouteCutoverState, session_id: u64, + shared: Arc, ) -> Result<()> where R: AsyncRead + Unpin + Send + 'static, @@ -735,7 +970,7 @@ where conn_id, user: user.clone(), peer, - peer_hash: hash_ip(peer.ip()), + peer_hash: hash_ip_in(shared.as_ref(), peer.ip()), started_at: Instant::now(), bytes_c2me: 0, bytes_me2c: bytes_me2c.clone(), @@ -1184,10 +1419,11 @@ where let mut client_closed = false; let mut frame_counter: u64 = 0; let mut route_watch_open = true; - let mut seen_pressure_seq = relay_pressure_event_seq(); + let mut seen_pressure_seq = relay_pressure_event_seq_in(shared.as_ref()); loop { if relay_idle_policy.enabled - && maybe_evict_idle_candidate_on_pressure( + && maybe_evict_idle_candidate_on_pressure_in( + shared.as_ref(), conn_id, &mut seen_pressure_seq, stats.as_ref(), @@ -1199,7 +1435,8 @@ where user = %user, "Middle-relay pressure eviction for idle-candidate session" ); - let _ = enqueue_c2me_command( + let _ = enqueue_c2me_command_in( + shared.as_ref(), &c2me_tx, C2MeCommand::Close, c2me_send_timeout, @@ -1224,7 +1461,8 @@ where "Cutover affected middle session, closing client connection" ); tokio::time::sleep(delay).await; - let _ = enqueue_c2me_command( + let _ = enqueue_c2me_command_in( + shared.as_ref(), &c2me_tx, C2MeCommand::Close, c2me_send_timeout, @@ -1241,7 +1479,7 @@ where route_watch_open = false; } } - payload_result = read_client_payload_with_idle_policy( + payload_result = read_client_payload_with_idle_policy_in( &mut crypto_reader, proto_tag, frame_limit, @@ -1249,6 +1487,7 @@ where &forensics, &mut frame_counter, &stats, + shared.as_ref(), &relay_idle_policy, &mut relay_idle_state, last_downstream_activity_ms.as_ref(), @@ -1288,7 +1527,8 @@ where flags |= RPC_FLAG_NOT_ENCRYPTED; } // Keep client read loop lightweight: route heavy ME send path via a dedicated task. - if enqueue_c2me_command( + if enqueue_c2me_command_in( + shared.as_ref(), &c2me_tx, C2MeCommand::Data { payload, flags }, c2me_send_timeout, @@ -1304,7 +1544,8 @@ where Ok(None) => { debug!(conn_id, "Client EOF"); client_closed = true; - let _ = enqueue_c2me_command( + let _ = enqueue_c2me_command_in( + shared.as_ref(), &c2me_tx, C2MeCommand::Close, c2me_send_timeout, @@ -1359,7 +1600,7 @@ where frames_ok = frame_counter, "ME relay cleanup" ); - clear_relay_idle_candidate(conn_id); + clear_relay_idle_candidate_in(shared.as_ref(), conn_id); me_pool.registry().unregister(conn_id).await; buffer_pool.trim_to(buffer_pool.max_buffers().min(64)); let pool_snapshot = buffer_pool.stats(); @@ -1371,7 +1612,7 @@ where result } -async fn read_client_payload_with_idle_policy( +async fn read_client_payload_with_idle_policy_in( client_reader: &mut CryptoReader, proto_tag: ProtoTag, max_frame: usize, @@ -1379,6 +1620,7 @@ async fn read_client_payload_with_idle_policy( forensics: &RelayForensicsState, frame_counter: &mut u64, stats: &Stats, + shared: &ProxySharedState, idle_policy: &RelayClientIdlePolicy, idle_state: &mut RelayClientIdleState, last_downstream_activity_ms: &AtomicU64, @@ -1398,6 +1640,7 @@ where session_started_at: Instant, forensics: &RelayForensicsState, stats: &Stats, + shared: &ProxySharedState, read_label: &'static str, ) -> Result<()> where @@ -1433,7 +1676,7 @@ where let hard_deadline = hard_deadline(idle_policy, idle_state, session_started_at, downstream_ms); if now >= hard_deadline { - clear_relay_idle_candidate(forensics.conn_id); + clear_relay_idle_candidate_in(shared, forensics.conn_id); stats.increment_relay_idle_hard_close_total(); let client_idle_secs = now .saturating_duration_since(idle_state.last_client_frame_at) @@ -1471,7 +1714,7 @@ where >= idle_policy.soft_idle { idle_state.soft_idle_marked = true; - if mark_relay_idle_candidate(forensics.conn_id) { + if mark_relay_idle_candidate_in(shared, forensics.conn_id) { stats.increment_relay_idle_soft_mark_total(); } info!( @@ -1541,6 +1784,7 @@ where session_started_at, forensics, stats, + shared, "abridged.first_len_byte", ) .await @@ -1564,6 +1808,7 @@ where session_started_at, forensics, stats, + shared, "abridged.extended_len", ) .await?; @@ -1588,6 +1833,7 @@ where session_started_at, forensics, stats, + shared, "len_prefix", ) .await @@ -1644,7 +1890,8 @@ where } if len > max_frame { - return Err(report_desync_frame_too_large( + return Err(report_desync_frame_too_large_in( + shared, forensics, proto_tag, *frame_counter, @@ -1686,6 +1933,7 @@ where session_started_at, forensics, stats, + shared, "payload", ) .await?; @@ -1697,11 +1945,46 @@ where *frame_counter += 1; idle_state.on_client_frame(Instant::now()); idle_state.tiny_frame_debt = idle_state.tiny_frame_debt.saturating_sub(1); - clear_relay_idle_candidate(forensics.conn_id); + clear_relay_idle_candidate_in(shared, forensics.conn_id); return Ok(Some((payload, quickack))); } } +#[cfg(test)] +async fn read_client_payload_with_idle_policy( + client_reader: &mut CryptoReader, + proto_tag: ProtoTag, + max_frame: usize, + buffer_pool: &Arc, + forensics: &RelayForensicsState, + frame_counter: &mut u64, + stats: &Stats, + idle_policy: &RelayClientIdlePolicy, + idle_state: &mut RelayClientIdleState, + last_downstream_activity_ms: &AtomicU64, + session_started_at: Instant, +) -> Result> +where + R: AsyncRead + Unpin + Send + 'static, +{ + let shared = ProxySharedState::new(); + read_client_payload_with_idle_policy_in( + client_reader, + proto_tag, + max_frame, + buffer_pool, + forensics, + frame_counter, + stats, + shared.as_ref(), + idle_policy, + idle_state, + last_downstream_activity_ms, + session_started_at, + ) + .await +} + #[cfg(test)] async fn read_client_payload_legacy( client_reader: &mut CryptoReader, @@ -1717,10 +2000,11 @@ where R: AsyncRead + Unpin + Send + 'static, { let now = Instant::now(); + let shared = ProxySharedState::new(); let mut idle_state = RelayClientIdleState::new(now); let last_downstream_activity_ms = AtomicU64::new(0); let idle_policy = RelayClientIdlePolicy::disabled(frame_read_timeout); - read_client_payload_with_idle_policy( + read_client_payload_with_idle_policy_in( client_reader, proto_tag, max_frame, @@ -1728,6 +2012,7 @@ where forensics, frame_counter, stats, + shared.as_ref(), &idle_policy, &mut idle_state, &last_downstream_activity_ms, diff --git a/src/proxy/mod.rs b/src/proxy/mod.rs index cdeb151..c4ce09c 100644 --- a/src/proxy/mod.rs +++ b/src/proxy/mod.rs @@ -67,6 +67,7 @@ pub mod middle_relay; pub mod relay; pub mod route_mode; pub mod session_eviction; +pub mod shared_state; pub use client::ClientHandler; #[allow(unused_imports)] @@ -79,3 +80,11 @@ pub use relay::*; #[cfg(test)] #[path = "tests/test_harness_common.rs"] mod test_harness_common; + +#[cfg(test)] +#[path = "tests/proxy_shared_state_isolation_tests.rs"] +mod proxy_shared_state_isolation_tests; + +#[cfg(test)] +#[path = "tests/proxy_shared_state_parallel_execution_tests.rs"] +mod proxy_shared_state_parallel_execution_tests; diff --git a/src/proxy/shared_state.rs b/src/proxy/shared_state.rs new file mode 100644 index 0000000..2928b82 --- /dev/null +++ b/src/proxy/shared_state.rs @@ -0,0 +1,57 @@ +use std::collections::HashSet; +use std::collections::hash_map::RandomState; +use std::net::IpAddr; +use std::sync::atomic::AtomicU64; +use std::sync::{Arc, Mutex}; +use std::time::Instant; + +use dashmap::DashMap; + +use crate::proxy::handshake::{AuthProbeState, AuthProbeSaturationState}; +use crate::proxy::middle_relay::{DesyncDedupRotationState, RelayIdleCandidateRegistry}; + +pub(crate) struct HandshakeSharedState { + pub(crate) auth_probe: DashMap, + pub(crate) auth_probe_saturation: Mutex>, + pub(crate) auth_probe_eviction_hasher: RandomState, + pub(crate) invalid_secret_warned: Mutex>, + pub(crate) unknown_sni_warn_next_allowed: Mutex>, +} + +pub(crate) struct MiddleRelaySharedState { + pub(crate) desync_dedup: DashMap, + pub(crate) desync_dedup_previous: DashMap, + pub(crate) desync_hasher: RandomState, + pub(crate) desync_full_cache_last_emit_at: Mutex>, + pub(crate) desync_dedup_rotation_state: Mutex, + pub(crate) relay_idle_registry: Mutex, + pub(crate) relay_idle_mark_seq: AtomicU64, +} + +pub(crate) struct ProxySharedState { + pub(crate) handshake: HandshakeSharedState, + pub(crate) middle_relay: MiddleRelaySharedState, +} + +impl ProxySharedState { + pub(crate) fn new() -> Arc { + Arc::new(Self { + handshake: HandshakeSharedState { + auth_probe: DashMap::new(), + auth_probe_saturation: Mutex::new(None), + auth_probe_eviction_hasher: RandomState::new(), + invalid_secret_warned: Mutex::new(HashSet::new()), + unknown_sni_warn_next_allowed: Mutex::new(None), + }, + middle_relay: MiddleRelaySharedState { + desync_dedup: DashMap::new(), + desync_dedup_previous: DashMap::new(), + desync_hasher: RandomState::new(), + desync_full_cache_last_emit_at: Mutex::new(None), + desync_dedup_rotation_state: Mutex::new(DesyncDedupRotationState::default()), + relay_idle_registry: Mutex::new(RelayIdleCandidateRegistry::default()), + relay_idle_mark_seq: AtomicU64::new(0), + }, + }) + } +} diff --git a/src/proxy/tests/handshake_advanced_clever_tests.rs b/src/proxy/tests/handshake_advanced_clever_tests.rs index 76347c4..4a521d8 100644 --- a/src/proxy/tests/handshake_advanced_clever_tests.rs +++ b/src/proxy/tests/handshake_advanced_clever_tests.rs @@ -7,12 +7,6 @@ use std::time::{Duration, Instant}; // --- Helpers --- -fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> { - auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()) -} - fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig { let mut cfg = ProxyConfig::default(); cfg.access.users.clear(); @@ -147,8 +141,8 @@ fn make_valid_tls_client_hello_with_alpn( #[tokio::test] async fn tls_minimum_viable_length_boundary() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0x11u8; 16]; let config = test_config_with_secret_hex("11111111111111111111111111111111"); @@ -200,8 +194,8 @@ async fn tls_minimum_viable_length_boundary() { #[tokio::test] async fn mtproto_extreme_dc_index_serialization() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret_hex = "22222222222222222222222222222222"; let config = test_config_with_secret_hex(secret_hex); @@ -241,8 +235,8 @@ async fn mtproto_extreme_dc_index_serialization() { #[tokio::test] async fn alpn_strict_case_and_padding_rejection() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0x33u8; 16]; let mut config = test_config_with_secret_hex("33333333333333333333333333333333"); @@ -297,8 +291,8 @@ fn ipv4_mapped_ipv6_bucketing_anomaly() { #[tokio::test] async fn mtproto_invalid_ciphertext_does_not_poison_replay_cache() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret_hex = "55555555555555555555555555555555"; let config = test_config_with_secret_hex(secret_hex); @@ -341,8 +335,8 @@ async fn mtproto_invalid_ciphertext_does_not_poison_replay_cache() { #[tokio::test] async fn tls_invalid_session_does_not_poison_replay_cache() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0x66u8; 16]; let config = test_config_with_secret_hex("66666666666666666666666666666666"); @@ -387,8 +381,8 @@ async fn tls_invalid_session_does_not_poison_replay_cache() { #[tokio::test] async fn server_hello_delay_timing_neutrality_on_hmac_failure() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0x77u8; 16]; let mut config = test_config_with_secret_hex("77777777777777777777777777777777"); @@ -425,8 +419,8 @@ async fn server_hello_delay_timing_neutrality_on_hmac_failure() { #[tokio::test] async fn server_hello_delay_inversion_resilience() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0x88u8; 16]; let mut config = test_config_with_secret_hex("88888888888888888888888888888888"); @@ -462,10 +456,9 @@ async fn server_hello_delay_inversion_resilience() { #[tokio::test] async fn mixed_valid_and_invalid_user_secrets_configuration() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); - let _warn_guard = warned_secrets_test_lock().lock().unwrap(); - clear_warned_secrets_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); + clear_warned_secrets_for_testing_in_shared(shared.as_ref()); let mut config = ProxyConfig::default(); config.access.ignore_time_skew = true; @@ -513,8 +506,8 @@ async fn mixed_valid_and_invalid_user_secrets_configuration() { #[tokio::test] async fn tls_emulation_fallback_when_cache_missing() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0xAAu8; 16]; let mut config = test_config_with_secret_hex("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); @@ -547,8 +540,8 @@ async fn tls_emulation_fallback_when_cache_missing() { #[tokio::test] async fn classic_mode_over_tls_transport_protocol_confusion() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret_hex = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"; let mut config = test_config_with_secret_hex(secret_hex); @@ -608,8 +601,8 @@ fn generate_tg_nonce_never_emits_reserved_bytes() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn dashmap_concurrent_saturation_stress() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let ip_a: IpAddr = "192.0.2.13".parse().unwrap(); let ip_b: IpAddr = "198.51.100.13".parse().unwrap(); @@ -617,9 +610,10 @@ async fn dashmap_concurrent_saturation_stress() { for i in 0..100 { let target_ip = if i % 2 == 0 { ip_a } else { ip_b }; + let shared = shared.clone(); tasks.push(tokio::spawn(async move { for _ in 0..50 { - auth_probe_record_failure(target_ip, Instant::now()); + auth_probe_record_failure_in(shared.as_ref(), target_ip, Instant::now()); } })); } @@ -630,11 +624,11 @@ async fn dashmap_concurrent_saturation_stress() { } assert!( - auth_probe_is_throttled_for_testing(ip_a), + auth_probe_is_throttled_for_testing_in_shared(shared.as_ref(), ip_a), "IP A must be throttled after concurrent stress" ); assert!( - auth_probe_is_throttled_for_testing(ip_b), + auth_probe_is_throttled_for_testing_in_shared(shared.as_ref(), ip_b), "IP B must be throttled after concurrent stress" ); } @@ -661,15 +655,15 @@ fn prototag_invalid_bytes_fail_closed() { #[test] fn auth_probe_eviction_hash_collision_stress() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); - let state = auth_probe_state_map(); + let state = auth_probe_state_for_testing_in_shared(shared.as_ref()); let now = Instant::now(); for i in 0..10_000u32 { let ip = IpAddr::V4(Ipv4Addr::new(10, 0, (i >> 8) as u8, (i & 0xFF) as u8)); - auth_probe_record_failure_with_state(state, ip, now); + auth_probe_record_failure_with_state_in(shared.as_ref(), state, ip, now); } assert!( diff --git a/src/proxy/tests/handshake_adversarial_tests.rs b/src/proxy/tests/handshake_adversarial_tests.rs index 93832f7..14d8fdd 100644 --- a/src/proxy/tests/handshake_adversarial_tests.rs +++ b/src/proxy/tests/handshake_adversarial_tests.rs @@ -44,12 +44,6 @@ fn make_valid_mtproto_handshake( handshake } -fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> { - auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()) -} - fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig { let mut cfg = ProxyConfig::default(); cfg.access.users.clear(); @@ -67,8 +61,8 @@ fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig { #[tokio::test] async fn mtproto_handshake_bit_flip_anywhere_rejected() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret_hex = "11223344556677889900aabbccddeeff"; let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2); @@ -181,26 +175,26 @@ async fn mtproto_handshake_timing_neutrality_mocked() { #[tokio::test] async fn auth_probe_throttle_saturation_stress() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let now = Instant::now(); // Record enough failures for one IP to trigger backoff let target_ip = IpAddr::V4(Ipv4Addr::new(1, 1, 1, 1)); for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS { - auth_probe_record_failure(target_ip, now); + auth_probe_record_failure_in(shared.as_ref(), target_ip, now); } - assert!(auth_probe_is_throttled(target_ip, now)); + assert!(auth_probe_is_throttled_in(shared.as_ref(), target_ip, now)); // Stress test with many unique IPs for i in 0..500u32 { let ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, (i % 256) as u8)); - auth_probe_record_failure(ip, now); + auth_probe_record_failure_in(shared.as_ref(), ip, now); } - let tracked = AUTH_PROBE_STATE.get().map(|state| state.len()).unwrap_or(0); + let tracked = auth_probe_state_for_testing_in_shared(shared.as_ref()).len(); assert!( tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES, "auth probe state grew past hard cap: {tracked} > {AUTH_PROBE_TRACK_MAX_ENTRIES}" @@ -209,8 +203,8 @@ async fn auth_probe_throttle_saturation_stress() { #[tokio::test] async fn mtproto_handshake_abridged_prefix_rejected() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let mut handshake = [0x5Au8; HANDSHAKE_LEN]; handshake[0] = 0xef; // Abridged prefix @@ -235,8 +229,8 @@ async fn mtproto_handshake_abridged_prefix_rejected() { #[tokio::test] async fn mtproto_handshake_preferred_user_mismatch_continues() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret1_hex = "11111111111111111111111111111111"; let secret2_hex = "22222222222222222222222222222222"; @@ -278,8 +272,8 @@ async fn mtproto_handshake_preferred_user_mismatch_continues() { #[tokio::test] async fn mtproto_handshake_concurrent_flood_stability() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret_hex = "00112233445566778899aabbccddeeff"; let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 1); @@ -320,8 +314,8 @@ async fn mtproto_handshake_concurrent_flood_stability() { #[tokio::test] async fn mtproto_replay_is_rejected_across_distinct_peers() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret_hex = "0123456789abcdeffedcba9876543210"; let handshake = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2); @@ -360,8 +354,8 @@ async fn mtproto_replay_is_rejected_across_distinct_peers() { #[tokio::test] async fn mtproto_blackhat_mutation_corpus_never_panics_and_stays_fail_closed() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret_hex = "89abcdef012345670123456789abcdef"; let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2); @@ -405,27 +399,27 @@ async fn mtproto_blackhat_mutation_corpus_never_panics_and_stays_fail_closed() { #[tokio::test] async fn auth_probe_success_clears_throttled_peer_state() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let target_ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 90)); let now = Instant::now(); for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS { - auth_probe_record_failure(target_ip, now); + auth_probe_record_failure_in(shared.as_ref(), target_ip, now); } - assert!(auth_probe_is_throttled(target_ip, now)); + assert!(auth_probe_is_throttled_in(shared.as_ref(), target_ip, now)); - auth_probe_record_success(target_ip); + auth_probe_record_success_in(shared.as_ref(), target_ip); assert!( - !auth_probe_is_throttled(target_ip, now + Duration::from_millis(1)), + !auth_probe_is_throttled_in(shared.as_ref(), target_ip, now + Duration::from_millis(1)), "successful auth must clear per-peer throttle state" ); } #[tokio::test] async fn mtproto_invalid_storm_over_cap_keeps_probe_map_hard_bounded() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret_hex = "00112233445566778899aabbccddeeff"; let mut invalid = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2); @@ -458,7 +452,7 @@ async fn mtproto_invalid_storm_over_cap_keeps_probe_map_hard_bounded() { assert!(matches!(res, HandshakeResult::BadClient { .. })); } - let tracked = AUTH_PROBE_STATE.get().map(|state| state.len()).unwrap_or(0); + let tracked = auth_probe_state_for_testing_in_shared(shared.as_ref()).len(); assert!( tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES, "probe map must remain bounded under invalid storm: {tracked}" @@ -467,8 +461,8 @@ async fn mtproto_invalid_storm_over_cap_keeps_probe_map_hard_bounded() { #[tokio::test] async fn mtproto_property_style_multi_bit_mutations_fail_closed_or_auth_only() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret_hex = "f0e1d2c3b4a5968778695a4b3c2d1e0f"; let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2); @@ -520,8 +514,8 @@ async fn mtproto_property_style_multi_bit_mutations_fail_closed_or_auth_only() { #[tokio::test] #[ignore = "heavy soak; run manually"] async fn mtproto_blackhat_20k_mutation_soak_never_panics() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret_hex = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2); diff --git a/src/proxy/tests/handshake_auth_probe_eviction_bias_security_tests.rs b/src/proxy/tests/handshake_auth_probe_eviction_bias_security_tests.rs index 77cea19..f6192f3 100644 --- a/src/proxy/tests/handshake_auth_probe_eviction_bias_security_tests.rs +++ b/src/proxy/tests/handshake_auth_probe_eviction_bias_security_tests.rs @@ -3,15 +3,9 @@ use std::collections::HashSet; use std::net::{IpAddr, Ipv4Addr}; use std::time::{Duration, Instant}; -fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> { - auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()) -} - #[test] fn adversarial_large_state_offsets_escape_first_scan_window() { - let _guard = auth_probe_test_guard(); + let shared = ProxySharedState::new(); let base = Instant::now(); let state_len = 65_536usize; let scan_limit = 1_024usize; @@ -25,7 +19,7 @@ fn adversarial_large_state_offsets_escape_first_scan_window() { ((i.wrapping_mul(131)) & 0xff) as u8, )); let now = base + Duration::from_nanos(i); - let start = auth_probe_scan_start_offset(ip, now, state_len, scan_limit); + let start = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit); if start >= scan_limit { saw_offset_outside_first_window = true; break; @@ -40,7 +34,7 @@ fn adversarial_large_state_offsets_escape_first_scan_window() { #[test] fn stress_large_state_offsets_cover_many_scan_windows() { - let _guard = auth_probe_test_guard(); + let shared = ProxySharedState::new(); let base = Instant::now(); let state_len = 65_536usize; let scan_limit = 1_024usize; @@ -54,7 +48,7 @@ fn stress_large_state_offsets_cover_many_scan_windows() { ((i.wrapping_mul(17)) & 0xff) as u8, )); let now = base + Duration::from_micros(i); - let start = auth_probe_scan_start_offset(ip, now, state_len, scan_limit); + let start = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit); covered_windows.insert(start / scan_limit); } @@ -68,7 +62,7 @@ fn stress_large_state_offsets_cover_many_scan_windows() { #[test] fn light_fuzz_offset_always_stays_inside_state_len() { - let _guard = auth_probe_test_guard(); + let shared = ProxySharedState::new(); let mut seed = 0xC0FF_EE12_3456_789Au64; let base = Instant::now(); @@ -86,7 +80,7 @@ fn light_fuzz_offset_always_stays_inside_state_len() { let state_len = ((seed >> 16) as usize % 200_000).saturating_add(1); let scan_limit = ((seed >> 40) as usize % 2_048).saturating_add(1); let now = base + Duration::from_nanos(seed & 0x0fff); - let start = auth_probe_scan_start_offset(ip, now, state_len, scan_limit); + let start = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit); assert!( start < state_len, diff --git a/src/proxy/tests/handshake_auth_probe_hardening_adversarial_tests.rs b/src/proxy/tests/handshake_auth_probe_hardening_adversarial_tests.rs index d8fac4f..5268b2f 100644 --- a/src/proxy/tests/handshake_auth_probe_hardening_adversarial_tests.rs +++ b/src/proxy/tests/handshake_auth_probe_hardening_adversarial_tests.rs @@ -2,68 +2,62 @@ use super::*; use std::net::{IpAddr, Ipv4Addr}; use std::time::{Duration, Instant}; -fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> { - auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()) -} - #[test] fn positive_preauth_throttle_activates_after_failure_threshold() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 20)); let now = Instant::now(); for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS { - auth_probe_record_failure(ip, now); + auth_probe_record_failure_in(shared.as_ref(), ip, now); } assert!( - auth_probe_is_throttled(ip, now), + auth_probe_is_throttled_in(shared.as_ref(), ip, now), "peer must be throttled once fail streak reaches threshold" ); } #[test] fn negative_unrelated_peer_remains_unthrottled() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let attacker = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 12)); let benign = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 13)); let now = Instant::now(); for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS { - auth_probe_record_failure(attacker, now); + auth_probe_record_failure_in(shared.as_ref(), attacker, now); } - assert!(auth_probe_is_throttled(attacker, now)); + assert!(auth_probe_is_throttled_in(shared.as_ref(), attacker, now)); assert!( - !auth_probe_is_throttled(benign, now), + !auth_probe_is_throttled_in(shared.as_ref(), benign, now), "throttle state must stay scoped to normalized peer key" ); } #[test] fn edge_expired_entry_is_pruned_and_no_longer_throttled() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let ip = IpAddr::V4(Ipv4Addr::new(192, 0, 2, 41)); let base = Instant::now(); for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS { - auth_probe_record_failure(ip, base); + auth_probe_record_failure_in(shared.as_ref(), ip, base); } let expired_at = base + Duration::from_secs(AUTH_PROBE_TRACK_RETENTION_SECS + 1); assert!( - !auth_probe_is_throttled(ip, expired_at), + !auth_probe_is_throttled_in(shared.as_ref(), ip, expired_at), "expired entries must not keep throttling peers" ); - let state = auth_probe_state_map(); + let state = auth_probe_state_for_testing_in_shared(shared.as_ref()); assert!( state.get(&normalize_auth_probe_ip(ip)).is_none(), "expired lookup should prune stale state" @@ -72,36 +66,36 @@ fn edge_expired_entry_is_pruned_and_no_longer_throttled() { #[test] fn adversarial_saturation_grace_requires_extra_failures_before_preauth_throttle() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let ip = IpAddr::V4(Ipv4Addr::new(198, 18, 0, 7)); let now = Instant::now(); for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS { - auth_probe_record_failure(ip, now); + auth_probe_record_failure_in(shared.as_ref(), ip, now); } - auth_probe_note_saturation(now); + auth_probe_note_saturation_in(shared.as_ref(), now); assert!( - !auth_probe_should_apply_preauth_throttle(ip, now), + !auth_probe_should_apply_preauth_throttle_in(shared.as_ref(), ip, now), "during global saturation, peer must receive configured grace window" ); for _ in 0..AUTH_PROBE_SATURATION_GRACE_FAILS { - auth_probe_record_failure(ip, now + Duration::from_millis(1)); + auth_probe_record_failure_in(shared.as_ref(), ip, now + Duration::from_millis(1)); } assert!( - auth_probe_should_apply_preauth_throttle(ip, now + Duration::from_millis(1)), + auth_probe_should_apply_preauth_throttle_in(shared.as_ref(), ip, now + Duration::from_millis(1)), "after grace failures are exhausted, preauth throttle must activate" ); } #[test] fn integration_over_cap_insertion_keeps_probe_map_bounded() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let now = Instant::now(); for idx in 0..(AUTH_PROBE_TRACK_MAX_ENTRIES + 1024) { @@ -111,10 +105,10 @@ fn integration_over_cap_insertion_keeps_probe_map_bounded() { ((idx / 256) % 256) as u8, (idx % 256) as u8, )); - auth_probe_record_failure(ip, now); + auth_probe_record_failure_in(shared.as_ref(), ip, now); } - let tracked = auth_probe_state_map().len(); + let tracked = auth_probe_state_for_testing_in_shared(shared.as_ref()).len(); assert!( tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES, "probe map must remain hard bounded under insertion storm" @@ -123,8 +117,8 @@ fn integration_over_cap_insertion_keeps_probe_map_bounded() { #[test] fn light_fuzz_randomized_failures_preserve_cap_and_nonzero_streaks() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let mut seed = 0x4D53_5854_6F66_6175u64; let now = Instant::now(); @@ -140,10 +134,10 @@ fn light_fuzz_randomized_failures_preserve_cap_and_nonzero_streaks() { (seed >> 8) as u8, seed as u8, )); - auth_probe_record_failure(ip, now + Duration::from_millis((seed & 0x3f) as u64)); + auth_probe_record_failure_in(shared.as_ref(), ip, now + Duration::from_millis((seed & 0x3f) as u64)); } - let state = auth_probe_state_map(); + let state = auth_probe_state_for_testing_in_shared(shared.as_ref()); assert!(state.len() <= AUTH_PROBE_TRACK_MAX_ENTRIES); for entry in state.iter() { assert!(entry.value().fail_streak > 0); @@ -152,13 +146,14 @@ fn light_fuzz_randomized_failures_preserve_cap_and_nonzero_streaks() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn stress_parallel_failure_flood_keeps_state_hard_capped() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let start = Instant::now(); let mut tasks = Vec::new(); for worker in 0..8u8 { + let shared = shared.clone(); tasks.push(tokio::spawn(async move { for i in 0..4096u32 { let ip = IpAddr::V4(Ipv4Addr::new( @@ -167,7 +162,7 @@ async fn stress_parallel_failure_flood_keeps_state_hard_capped() { ((i >> 8) & 0xff) as u8, (i & 0xff) as u8, )); - auth_probe_record_failure(ip, start + Duration::from_millis((i % 4) as u64)); + auth_probe_record_failure_in(shared.as_ref(), ip, start + Duration::from_millis((i % 4) as u64)); } })); } @@ -176,12 +171,12 @@ async fn stress_parallel_failure_flood_keeps_state_hard_capped() { task.await.expect("stress worker must not panic"); } - let tracked = auth_probe_state_map().len(); + let tracked = auth_probe_state_for_testing_in_shared(shared.as_ref()).len(); assert!( tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES, "parallel failure flood must not exceed cap" ); let probe = IpAddr::V4(Ipv4Addr::new(172, 3, 4, 5)); - let _ = auth_probe_is_throttled(probe, start + Duration::from_millis(2)); + let _ = auth_probe_is_throttled_in(shared.as_ref(), probe, start + Duration::from_millis(2)); } diff --git a/src/proxy/tests/handshake_auth_probe_scan_budget_security_tests.rs b/src/proxy/tests/handshake_auth_probe_scan_budget_security_tests.rs index c91a215..0fb3b68 100644 --- a/src/proxy/tests/handshake_auth_probe_scan_budget_security_tests.rs +++ b/src/proxy/tests/handshake_auth_probe_scan_budget_security_tests.rs @@ -2,20 +2,14 @@ use super::*; use std::net::{IpAddr, Ipv4Addr}; use std::time::{Duration, Instant}; -fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> { - auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()) -} - #[test] fn edge_zero_state_len_yields_zero_start_offset() { - let _guard = auth_probe_test_guard(); + let shared = ProxySharedState::new(); let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 44)); let now = Instant::now(); assert_eq!( - auth_probe_scan_start_offset(ip, now, 0, 16), + auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, 0, 16), 0, "empty map must not produce non-zero scan offset" ); @@ -23,7 +17,7 @@ fn edge_zero_state_len_yields_zero_start_offset() { #[test] fn adversarial_large_state_must_allow_start_offset_outside_scan_budget_window() { - let _guard = auth_probe_test_guard(); + let shared = ProxySharedState::new(); let base = Instant::now(); let scan_limit = 16usize; let state_len = 65_536usize; @@ -37,7 +31,7 @@ fn adversarial_large_state_must_allow_start_offset_outside_scan_budget_window() (i & 0xff) as u8, )); let now = base + Duration::from_micros(i as u64); - let start = auth_probe_scan_start_offset(ip, now, state_len, scan_limit); + let start = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit); assert!( start < state_len, "start offset must stay within state length; start={start}, len={state_len}" @@ -56,12 +50,12 @@ fn adversarial_large_state_must_allow_start_offset_outside_scan_budget_window() #[test] fn positive_state_smaller_than_scan_limit_caps_to_state_len() { - let _guard = auth_probe_test_guard(); + let shared = ProxySharedState::new(); let ip = IpAddr::V4(Ipv4Addr::new(192, 0, 2, 17)); let now = Instant::now(); for state_len in 1..32usize { - let start = auth_probe_scan_start_offset(ip, now, state_len, 64); + let start = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, 64); assert!( start < state_len, "start offset must never exceed state length when scan limit is larger" @@ -71,7 +65,7 @@ fn positive_state_smaller_than_scan_limit_caps_to_state_len() { #[test] fn light_fuzz_scan_offset_budget_never_exceeds_effective_window() { - let _guard = auth_probe_test_guard(); + let shared = ProxySharedState::new(); let mut seed = 0x5A41_5356_4C32_3236u64; let base = Instant::now(); @@ -89,7 +83,7 @@ fn light_fuzz_scan_offset_budget_never_exceeds_effective_window() { let state_len = ((seed >> 8) as usize % 131_072).saturating_add(1); let scan_limit = ((seed >> 32) as usize % 512).saturating_add(1); let now = base + Duration::from_nanos(seed & 0xffff); - let start = auth_probe_scan_start_offset(ip, now, state_len, scan_limit); + let start = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit); assert!( start < state_len, diff --git a/src/proxy/tests/handshake_auth_probe_scan_offset_stress_tests.rs b/src/proxy/tests/handshake_auth_probe_scan_offset_stress_tests.rs index bf97990..fd08c1b 100644 --- a/src/proxy/tests/handshake_auth_probe_scan_offset_stress_tests.rs +++ b/src/proxy/tests/handshake_auth_probe_scan_offset_stress_tests.rs @@ -3,22 +3,16 @@ use std::collections::HashSet; use std::net::{IpAddr, Ipv4Addr}; use std::time::{Duration, Instant}; -fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> { - auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()) -} - #[test] fn positive_same_ip_moving_time_yields_diverse_scan_offsets() { - let _guard = auth_probe_test_guard(); + let shared = ProxySharedState::new(); let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 77)); let base = Instant::now(); let mut uniq = HashSet::new(); for i in 0..512u64 { let now = base + Duration::from_nanos(i); - let offset = auth_probe_scan_start_offset(ip, now, 65_536, 16); + let offset = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, 65_536, 16); uniq.insert(offset); } @@ -31,7 +25,7 @@ fn positive_same_ip_moving_time_yields_diverse_scan_offsets() { #[test] fn adversarial_many_ips_same_time_spreads_offsets_without_bias_collapse() { - let _guard = auth_probe_test_guard(); + let shared = ProxySharedState::new(); let now = Instant::now(); let mut uniq = HashSet::new(); @@ -42,7 +36,7 @@ fn adversarial_many_ips_same_time_spreads_offsets_without_bias_collapse() { i as u8, (255 - (i as u8)), )); - uniq.insert(auth_probe_scan_start_offset(ip, now, 65_536, 16)); + uniq.insert(auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, 65_536, 16)); } assert!( @@ -54,12 +48,13 @@ fn adversarial_many_ips_same_time_spreads_offsets_without_bias_collapse() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn stress_parallel_failure_churn_under_saturation_remains_capped_and_live() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let start = Instant::now(); let mut workers = Vec::new(); for worker in 0..8u8 { + let shared = shared.clone(); workers.push(tokio::spawn(async move { for i in 0..8192u32 { let ip = IpAddr::V4(Ipv4Addr::new( @@ -68,7 +63,7 @@ async fn stress_parallel_failure_churn_under_saturation_remains_capped_and_live( ((i >> 8) & 0xff) as u8, (i & 0xff) as u8, )); - auth_probe_record_failure(ip, start + Duration::from_micros((i % 128) as u64)); + auth_probe_record_failure_in(shared.as_ref(), ip, start + Duration::from_micros((i % 128) as u64)); } })); } @@ -78,17 +73,17 @@ async fn stress_parallel_failure_churn_under_saturation_remains_capped_and_live( } assert!( - auth_probe_state_map().len() <= AUTH_PROBE_TRACK_MAX_ENTRIES, + auth_probe_state_for_testing_in_shared(shared.as_ref()).len() <= AUTH_PROBE_TRACK_MAX_ENTRIES, "state must remain hard-capped under parallel saturation churn" ); let probe = IpAddr::V4(Ipv4Addr::new(10, 4, 1, 1)); - let _ = auth_probe_should_apply_preauth_throttle(probe, start + Duration::from_millis(1)); + let _ = auth_probe_should_apply_preauth_throttle_in(shared.as_ref(), probe, start + Duration::from_millis(1)); } #[test] fn light_fuzz_scan_offset_stays_within_window_for_randomized_inputs() { - let _guard = auth_probe_test_guard(); + let shared = ProxySharedState::new(); let mut seed = 0xA55A_1357_2468_9BDFu64; let base = Instant::now(); @@ -107,7 +102,7 @@ fn light_fuzz_scan_offset_stays_within_window_for_randomized_inputs() { let scan_limit = ((seed >> 40) as usize % 1024).saturating_add(1); let now = base + Duration::from_nanos(seed & 0x1fff); - let offset = auth_probe_scan_start_offset(ip, now, state_len, scan_limit); + let offset = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit); assert!( offset < state_len, "scan offset must always remain inside state length" diff --git a/src/proxy/tests/handshake_baseline_invariant_tests.rs b/src/proxy/tests/handshake_baseline_invariant_tests.rs index 40b03a0..0cab662 100644 --- a/src/proxy/tests/handshake_baseline_invariant_tests.rs +++ b/src/proxy/tests/handshake_baseline_invariant_tests.rs @@ -36,16 +36,10 @@ fn make_valid_tls_handshake(secret: &[u8], timestamp: u32) -> Vec { handshake } -fn test_lock_guard() -> std::sync::MutexGuard<'static, ()> { - auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()) -} - #[tokio::test] async fn handshake_baseline_probe_always_falls_back_to_masking() { - let _guard = test_lock_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let cfg = test_config_with_secret_hex("11111111111111111111111111111111"); let replay_checker = ReplayChecker::new(64, Duration::from_secs(60)); @@ -70,8 +64,8 @@ async fn handshake_baseline_probe_always_falls_back_to_masking() { #[tokio::test] async fn handshake_baseline_invalid_secret_triggers_fallback_not_error_response() { - let _guard = test_lock_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let good_secret = [0x22u8; 16]; let bad_cfg = test_config_with_secret_hex("33333333333333333333333333333333"); @@ -97,8 +91,8 @@ async fn handshake_baseline_invalid_secret_triggers_fallback_not_error_response( #[tokio::test] async fn handshake_baseline_auth_probe_streak_increments_per_ip() { - let _guard = test_lock_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let cfg = test_config_with_secret_hex("44444444444444444444444444444444"); let replay_checker = ReplayChecker::new(64, Duration::from_secs(60)); @@ -109,7 +103,7 @@ async fn handshake_baseline_auth_probe_streak_increments_per_ip() { let bad_probe = b"\x16\x03\x01\x00"; for expected in 1..=3 { - let res = handle_tls_handshake( + let res = handle_tls_handshake_with_shared( bad_probe, tokio::io::empty(), tokio::io::sink(), @@ -118,43 +112,44 @@ async fn handshake_baseline_auth_probe_streak_increments_per_ip() { &replay_checker, &rng, None, + shared.as_ref(), ) .await; assert!(matches!(res, HandshakeResult::BadClient { .. })); - assert_eq!(auth_probe_fail_streak_for_testing(peer.ip()), Some(expected)); - assert_eq!(auth_probe_fail_streak_for_testing(untouched_ip), None); + assert_eq!(auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), Some(expected)); + assert_eq!(auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), untouched_ip), None); } } #[test] fn handshake_baseline_saturation_fires_at_compile_time_threshold() { - let _guard = test_lock_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 33)); let now = Instant::now(); for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS.saturating_sub(1) { - auth_probe_record_failure(ip, now); + auth_probe_record_failure_in(shared.as_ref(), ip, now); } - assert!(!auth_probe_is_throttled(ip, now)); + assert!(!auth_probe_is_throttled_in(shared.as_ref(), ip, now)); - auth_probe_record_failure(ip, now); - assert!(auth_probe_is_throttled(ip, now)); + auth_probe_record_failure_in(shared.as_ref(), ip, now); + assert!(auth_probe_is_throttled_in(shared.as_ref(), ip, now)); } #[test] fn handshake_baseline_repeated_probes_streak_monotonic() { - let _guard = test_lock_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 42)); let now = Instant::now(); let mut prev = 0u32; for _ in 0..100 { - auth_probe_record_failure(ip, now); - let current = auth_probe_fail_streak_for_testing(ip).unwrap_or(0); + auth_probe_record_failure_in(shared.as_ref(), ip, now); + let current = auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), ip).unwrap_or(0); assert!(current >= prev, "streak must be monotonic"); prev = current; } @@ -162,14 +157,14 @@ fn handshake_baseline_repeated_probes_streak_monotonic() { #[test] fn handshake_baseline_throttled_ip_incurs_backoff_delay() { - let _guard = test_lock_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 44)); let now = Instant::now(); for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS { - auth_probe_record_failure(ip, now); + auth_probe_record_failure_in(shared.as_ref(), ip, now); } let delay = auth_probe_backoff(AUTH_PROBE_BACKOFF_START_FAILS); @@ -178,14 +173,14 @@ fn handshake_baseline_throttled_ip_incurs_backoff_delay() { let before_expiry = now + delay.saturating_sub(Duration::from_millis(1)); let after_expiry = now + delay + Duration::from_millis(1); - assert!(auth_probe_is_throttled(ip, before_expiry)); - assert!(!auth_probe_is_throttled(ip, after_expiry)); + assert!(auth_probe_is_throttled_in(shared.as_ref(), ip, before_expiry)); + assert!(!auth_probe_is_throttled_in(shared.as_ref(), ip, after_expiry)); } #[tokio::test] async fn handshake_baseline_malformed_probe_frames_fail_closed_to_masking() { - let _guard = test_lock_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let cfg = test_config_with_secret_hex("55555555555555555555555555555555"); let replay_checker = ReplayChecker::new(64, Duration::from_secs(60)); diff --git a/src/proxy/tests/handshake_fuzz_security_tests.rs b/src/proxy/tests/handshake_fuzz_security_tests.rs index efb596b..c56b184 100644 --- a/src/proxy/tests/handshake_fuzz_security_tests.rs +++ b/src/proxy/tests/handshake_fuzz_security_tests.rs @@ -67,16 +67,10 @@ fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig { cfg } -fn auth_probe_test_guard() -> MutexGuard<'static, ()> { - auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()) -} - #[tokio::test] async fn mtproto_handshake_duplicate_digest_is_replayed_on_second_attempt() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret_hex = "11223344556677889900aabbccddeeff"; let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2); @@ -110,13 +104,13 @@ async fn mtproto_handshake_duplicate_digest_is_replayed_on_second_attempt() { .await; assert!(matches!(second, HandshakeResult::BadClient { .. })); - clear_auth_probe_state_for_testing(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); } #[tokio::test] async fn mtproto_handshake_fuzz_corpus_never_panics_and_stays_fail_closed() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret_hex = "00112233445566778899aabbccddeeff"; let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 1); @@ -178,13 +172,13 @@ async fn mtproto_handshake_fuzz_corpus_never_panics_and_stays_fail_closed() { ); } - clear_auth_probe_state_for_testing(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); } #[tokio::test] async fn mtproto_handshake_mixed_corpus_never_panics_and_exact_duplicates_are_rejected() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret_hex = "99887766554433221100ffeeddccbbaa"; let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 4); @@ -274,5 +268,5 @@ async fn mtproto_handshake_mixed_corpus_never_panics_and_exact_duplicates_are_re ); } - clear_auth_probe_state_for_testing(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); } diff --git a/src/proxy/tests/handshake_more_clever_tests.rs b/src/proxy/tests/handshake_more_clever_tests.rs index 9782469..b8e5ae8 100644 --- a/src/proxy/tests/handshake_more_clever_tests.rs +++ b/src/proxy/tests/handshake_more_clever_tests.rs @@ -11,12 +11,6 @@ use tokio::sync::Barrier; // --- Helpers --- -fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> { - auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()) -} - fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig { let mut cfg = ProxyConfig::default(); cfg.access.users.clear(); @@ -164,8 +158,8 @@ fn make_valid_tls_client_hello_with_sni_and_alpn( #[tokio::test] async fn server_hello_delay_bypassed_if_max_is_zero_despite_high_min() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0x1Au8; 16]; let mut config = test_config_with_secret_hex("1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a"); @@ -201,10 +195,10 @@ async fn server_hello_delay_bypassed_if_max_is_zero_despite_high_min() { #[test] fn auth_probe_backoff_extreme_fail_streak_clamps_safely() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); - let state = auth_probe_state_map(); + let state = auth_probe_state_for_testing_in_shared(shared.as_ref()); let peer_ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 99)); let now = Instant::now(); @@ -217,7 +211,7 @@ fn auth_probe_backoff_extreme_fail_streak_clamps_safely() { }, ); - auth_probe_record_failure_with_state(&state, peer_ip, now); + auth_probe_record_failure_with_state_in(shared.as_ref(), &state, peer_ip, now); let updated = state.get(&peer_ip).unwrap(); assert_eq!(updated.fail_streak, u32::MAX); @@ -270,8 +264,8 @@ fn generate_tg_nonce_cryptographic_uniqueness_and_entropy() { #[tokio::test] async fn mtproto_multi_user_decryption_isolation() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let mut config = ProxyConfig::default(); config.general.modes.secure = true; @@ -323,10 +317,8 @@ async fn mtproto_multi_user_decryption_isolation() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn invalid_secret_warning_lock_contention_and_bound() { - let _guard = warned_secrets_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_warned_secrets_for_testing(); + let shared = ProxySharedState::new(); + clear_warned_secrets_for_testing_in_shared(shared.as_ref()); let tasks = 50; let iterations_per_task = 100; @@ -335,11 +327,12 @@ async fn invalid_secret_warning_lock_contention_and_bound() { for t in 0..tasks { let b = barrier.clone(); + let shared = shared.clone(); handles.push(tokio::spawn(async move { b.wait().await; for i in 0..iterations_per_task { let user_name = format!("contention_user_{}_{}", t, i); - warn_invalid_secret_once(&user_name, "invalid_hex", ACCESS_SECRET_BYTES, None); + warn_invalid_secret_once_in(shared.as_ref(), &user_name, "invalid_hex", ACCESS_SECRET_BYTES, None); } })); } @@ -348,7 +341,7 @@ async fn invalid_secret_warning_lock_contention_and_bound() { handle.await.unwrap(); } - let warned = INVALID_SECRET_WARNED.get().unwrap(); + let warned = warned_secrets_for_testing_in_shared(shared.as_ref()); let guard = warned .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); @@ -362,8 +355,8 @@ async fn invalid_secret_warning_lock_contention_and_bound() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn mtproto_strict_concurrent_replay_race_condition() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret_hex = "4A4A4A4A4A4A4A4A4A4A4A4A4A4A4A4A"; let config = Arc::new(test_config_with_secret_hex(secret_hex)); @@ -428,8 +421,8 @@ async fn mtproto_strict_concurrent_replay_race_condition() { #[tokio::test] async fn tls_alpn_zero_length_protocol_handled_safely() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0x5Bu8; 16]; let mut config = test_config_with_secret_hex("5b5b5b5b5b5b5b5b5b5b5b5b5b5b5b5b"); @@ -461,8 +454,8 @@ async fn tls_alpn_zero_length_protocol_handled_safely() { #[tokio::test] async fn tls_sni_massive_hostname_does_not_panic() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0x6Cu8; 16]; let config = test_config_with_secret_hex("6c6c6c6c6c6c6c6c6c6c6c6c6c6c6c6c"); @@ -497,8 +490,8 @@ async fn tls_sni_massive_hostname_does_not_panic() { #[tokio::test] async fn tls_progressive_truncation_fuzzing_no_panics() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0x7Du8; 16]; let config = test_config_with_secret_hex("7d7d7d7d7d7d7d7d7d7d7d7d7d7d7d7d"); @@ -535,8 +528,8 @@ async fn tls_progressive_truncation_fuzzing_no_panics() { #[tokio::test] async fn mtproto_pure_entropy_fuzzing_no_panics() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let config = test_config_with_secret_hex("8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e"); let replay_checker = ReplayChecker::new(128, Duration::from_secs(60)); @@ -569,10 +562,8 @@ async fn mtproto_pure_entropy_fuzzing_no_panics() { #[test] fn decode_user_secret_odd_length_hex_rejection() { - let _guard = warned_secrets_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_warned_secrets_for_testing(); + let shared = ProxySharedState::new(); + clear_warned_secrets_for_testing_in_shared(shared.as_ref()); let mut config = ProxyConfig::default(); config.access.users.clear(); @@ -581,7 +572,7 @@ fn decode_user_secret_odd_length_hex_rejection() { "1234567890123456789012345678901".to_string(), ); - let decoded = decode_user_secrets(&config, None); + let decoded = decode_user_secrets_in(shared.as_ref(), &config, None); assert!( decoded.is_empty(), "Odd-length hex string must be gracefully rejected by hex::decode without unwrapping" @@ -590,10 +581,10 @@ fn decode_user_secret_odd_length_hex_rejection() { #[test] fn saturation_grace_pre_existing_high_fail_streak_immediate_throttle() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); - let state = auth_probe_state_map(); + let state = auth_probe_state_for_testing_in_shared(shared.as_ref()); let peer_ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 112)); let now = Instant::now(); @@ -608,7 +599,7 @@ fn saturation_grace_pre_existing_high_fail_streak_immediate_throttle() { ); { - let mut guard = auth_probe_saturation_state_lock(); + let mut guard = auth_probe_saturation_state_lock_for_testing_in_shared(shared.as_ref()); *guard = Some(AuthProbeSaturationState { fail_streak: AUTH_PROBE_BACKOFF_START_FAILS, blocked_until: now + Duration::from_secs(5), @@ -616,7 +607,7 @@ fn saturation_grace_pre_existing_high_fail_streak_immediate_throttle() { }); } - let is_throttled = auth_probe_should_apply_preauth_throttle(peer_ip, now); + let is_throttled = auth_probe_should_apply_preauth_throttle_in(shared.as_ref(), peer_ip, now); assert!( is_throttled, "A peer with a pre-existing high fail streak must be immediately throttled when saturation begins, receiving no unearned grace period" @@ -625,21 +616,21 @@ fn saturation_grace_pre_existing_high_fail_streak_immediate_throttle() { #[test] fn auth_probe_saturation_note_resets_retention_window() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let base_time = Instant::now(); - auth_probe_note_saturation(base_time); + auth_probe_note_saturation_in(shared.as_ref(), base_time); let later = base_time + Duration::from_secs(AUTH_PROBE_TRACK_RETENTION_SECS - 1); - auth_probe_note_saturation(later); + auth_probe_note_saturation_in(shared.as_ref(), later); let check_time = base_time + Duration::from_secs(AUTH_PROBE_TRACK_RETENTION_SECS + 5); // This call may return false if backoff has elapsed, but it must not clear // the saturation state because `later` refreshed last_seen. - let _ = auth_probe_saturation_is_throttled_at_for_testing(check_time); - let guard = auth_probe_saturation_state_lock(); + let _ = auth_probe_saturation_is_throttled_at_for_testing_in_shared(shared.as_ref(), check_time); + let guard = auth_probe_saturation_state_lock_for_testing_in_shared(shared.as_ref()); assert!( guard.is_some(), "Ongoing saturation notes must refresh last_seen so saturation state remains retained past the original window" diff --git a/src/proxy/tests/handshake_real_bug_stress_tests.rs b/src/proxy/tests/handshake_real_bug_stress_tests.rs index 1e27ed5..8c81061 100644 --- a/src/proxy/tests/handshake_real_bug_stress_tests.rs +++ b/src/proxy/tests/handshake_real_bug_stress_tests.rs @@ -6,12 +6,6 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Barrier; -fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> { - auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()) -} - fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig { let mut cfg = ProxyConfig::default(); cfg.access.users.clear(); @@ -127,8 +121,8 @@ fn make_valid_mtproto_handshake( #[tokio::test] async fn tls_alpn_reject_does_not_pollute_replay_cache() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0x11u8; 16]; let mut config = test_config_with_secret_hex("11111111111111111111111111111111"); @@ -164,8 +158,8 @@ async fn tls_alpn_reject_does_not_pollute_replay_cache() { #[tokio::test] async fn tls_truncated_session_id_len_fails_closed_without_panic() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let config = test_config_with_secret_hex("33333333333333333333333333333333"); let replay_checker = ReplayChecker::new(128, Duration::from_secs(60)); @@ -193,10 +187,10 @@ async fn tls_truncated_session_id_len_fails_closed_without_panic() { #[test] fn auth_probe_eviction_identical_timestamps_keeps_map_bounded() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); - let state = auth_probe_state_map(); + let state = auth_probe_state_for_testing_in_shared(shared.as_ref()); let same = Instant::now(); for i in 0..AUTH_PROBE_TRACK_MAX_ENTRIES { @@ -212,7 +206,7 @@ fn auth_probe_eviction_identical_timestamps_keeps_map_bounded() { } let new_ip = IpAddr::V4(Ipv4Addr::new(192, 168, 21, 21)); - auth_probe_record_failure_with_state(state, new_ip, same + Duration::from_millis(1)); + auth_probe_record_failure_with_state_in(shared.as_ref(), state, new_ip, same + Duration::from_millis(1)); assert_eq!(state.len(), AUTH_PROBE_TRACK_MAX_ENTRIES); assert!(state.contains_key(&new_ip)); @@ -220,21 +214,21 @@ fn auth_probe_eviction_identical_timestamps_keeps_map_bounded() { #[test] fn clear_auth_probe_state_recovers_from_poisoned_saturation_lock() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); - let saturation = auth_probe_saturation_state(); + let shared_for_poison = shared.clone(); let poison_thread = std::thread::spawn(move || { - let _hold = saturation + let _hold = auth_probe_saturation_state_for_testing_in_shared(shared_for_poison.as_ref()) .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); panic!("intentional poison for regression coverage"); }); let _ = poison_thread.join(); - clear_auth_probe_state_for_testing(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); - let guard = auth_probe_saturation_state() + let guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()) .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); assert!(guard.is_none()); @@ -242,12 +236,9 @@ fn clear_auth_probe_state_recovers_from_poisoned_saturation_lock() { #[tokio::test] async fn mtproto_invalid_length_secret_is_ignored_and_valid_user_still_auths() { - let _probe_guard = auth_probe_test_guard(); - let _warn_guard = warned_secrets_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); - clear_warned_secrets_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); + clear_warned_secrets_for_testing_in_shared(shared.as_ref()); let mut config = ProxyConfig::default(); config.general.modes.secure = true; @@ -285,14 +276,14 @@ async fn mtproto_invalid_length_secret_is_ignored_and_valid_user_still_auths() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn saturation_grace_exhaustion_under_concurrency_keeps_peer_throttled() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let peer_ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 80)); let now = Instant::now(); { - let mut guard = auth_probe_saturation_state() + let mut guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()) .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); *guard = Some(AuthProbeSaturationState { @@ -302,7 +293,7 @@ async fn saturation_grace_exhaustion_under_concurrency_keeps_peer_throttled() { }); } - let state = auth_probe_state_map(); + let state = auth_probe_state_for_testing_in_shared(shared.as_ref()); state.insert( peer_ip, AuthProbeState { @@ -318,9 +309,10 @@ async fn saturation_grace_exhaustion_under_concurrency_keeps_peer_throttled() { for _ in 0..tasks { let b = barrier.clone(); + let shared = shared.clone(); handles.push(tokio::spawn(async move { b.wait().await; - auth_probe_record_failure(peer_ip, Instant::now()); + auth_probe_record_failure_in(shared.as_ref(), peer_ip, Instant::now()); })); } @@ -333,7 +325,7 @@ async fn saturation_grace_exhaustion_under_concurrency_keeps_peer_throttled() { final_state.fail_streak >= AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS ); - assert!(auth_probe_should_apply_preauth_throttle( + assert!(auth_probe_should_apply_preauth_throttle_in(shared.as_ref(), peer_ip, Instant::now() )); diff --git a/src/proxy/tests/handshake_saturation_poison_security_tests.rs b/src/proxy/tests/handshake_saturation_poison_security_tests.rs index 4c2ca5d..d7e1106 100644 --- a/src/proxy/tests/handshake_saturation_poison_security_tests.rs +++ b/src/proxy/tests/handshake_saturation_poison_security_tests.rs @@ -1,46 +1,39 @@ use super::*; use std::time::{Duration, Instant}; -fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> { - auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()) -} - -fn poison_saturation_mutex() { - let saturation = auth_probe_saturation_state(); - let poison_thread = std::thread::spawn(move || { +fn poison_saturation_mutex(shared: &ProxySharedState) { + let saturation = auth_probe_saturation_state_for_testing_in_shared(shared); + let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let _guard = saturation .lock() .expect("saturation mutex must be lockable for poison setup"); panic!("intentional poison for saturation mutex resilience test"); - }); - let _ = poison_thread.join(); + })); } #[test] fn auth_probe_saturation_note_recovers_after_mutex_poison() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); - poison_saturation_mutex(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); + poison_saturation_mutex(shared.as_ref()); let now = Instant::now(); - auth_probe_note_saturation(now); + auth_probe_note_saturation_in(shared.as_ref(), now); assert!( - auth_probe_saturation_is_throttled_at_for_testing(now), + auth_probe_saturation_is_throttled_at_for_testing_in_shared(shared.as_ref(), now), "poisoned saturation mutex must not disable saturation throttling" ); } #[test] fn auth_probe_saturation_check_recovers_after_mutex_poison() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); - poison_saturation_mutex(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); + poison_saturation_mutex(shared.as_ref()); { - let mut guard = auth_probe_saturation_state_lock(); + let mut guard = auth_probe_saturation_state_lock_for_testing_in_shared(shared.as_ref()); *guard = Some(AuthProbeSaturationState { fail_streak: AUTH_PROBE_BACKOFF_START_FAILS, blocked_until: Instant::now() + Duration::from_millis(10), @@ -49,23 +42,23 @@ fn auth_probe_saturation_check_recovers_after_mutex_poison() { } assert!( - auth_probe_saturation_is_throttled_for_testing(), + auth_probe_saturation_is_throttled_for_testing_in_shared(shared.as_ref()), "throttle check must recover poisoned saturation mutex and stay fail-closed" ); } #[test] fn clear_auth_probe_state_clears_saturation_even_if_poisoned() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); - poison_saturation_mutex(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); + poison_saturation_mutex(shared.as_ref()); - auth_probe_note_saturation(Instant::now()); - assert!(auth_probe_saturation_is_throttled_for_testing()); + auth_probe_note_saturation_in(shared.as_ref(), Instant::now()); + assert!(auth_probe_saturation_is_throttled_for_testing_in_shared(shared.as_ref())); - clear_auth_probe_state_for_testing(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); assert!( - !auth_probe_saturation_is_throttled_for_testing(), + !auth_probe_saturation_is_throttled_for_testing_in_shared(shared.as_ref()), "clear helper must clear saturation state even after poison" ); } diff --git a/src/proxy/tests/handshake_security_tests.rs b/src/proxy/tests/handshake_security_tests.rs index 0e43d35..7479772 100644 --- a/src/proxy/tests/handshake_security_tests.rs +++ b/src/proxy/tests/handshake_security_tests.rs @@ -697,10 +697,8 @@ async fn invalid_tls_probe_does_not_pollute_replay_cache() { #[tokio::test] async fn empty_decoded_secret_is_rejected() { - let _guard = warned_secrets_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_warned_secrets_for_testing(); + let shared = ProxySharedState::new(); + clear_warned_secrets_for_testing_in_shared(shared.as_ref()); let config = test_config_with_secret_hex(""); let replay_checker = ReplayChecker::new(128, Duration::from_secs(60)); let rng = SecureRandom::new(); @@ -724,10 +722,8 @@ async fn empty_decoded_secret_is_rejected() { #[tokio::test] async fn wrong_length_decoded_secret_is_rejected() { - let _guard = warned_secrets_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_warned_secrets_for_testing(); + let shared = ProxySharedState::new(); + clear_warned_secrets_for_testing_in_shared(shared.as_ref()); let config = test_config_with_secret_hex("aa"); let replay_checker = ReplayChecker::new(128, Duration::from_secs(60)); let rng = SecureRandom::new(); @@ -777,14 +773,10 @@ async fn invalid_mtproto_probe_does_not_pollute_replay_cache() { #[tokio::test] async fn mixed_secret_lengths_keep_valid_user_authenticating() { - let _probe_guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - let _guard = warned_secrets_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_warned_secrets_for_testing(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + let shared = ProxySharedState::new(); + clear_warned_secrets_for_testing_in_shared(shared.as_ref()); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let good_secret = [0x22u8; 16]; let mut config = ProxyConfig::default(); config.access.users.clear(); @@ -864,6 +856,7 @@ async fn tls_sni_preferred_user_hint_selects_matching_identity_first() { #[test] fn stress_decode_user_secrets_keeps_preferred_user_first_in_large_set() { + let shared = ProxySharedState::new(); let mut config = ProxyConfig::default(); config.access.users.clear(); @@ -881,7 +874,7 @@ fn stress_decode_user_secrets_keeps_preferred_user_first_in_large_set() { .users .insert(preferred_user.clone(), secret_hex.clone()); - let decoded = decode_user_secrets(&config, Some(preferred_user.as_str())); + let decoded = decode_user_secrets_in(shared.as_ref(), &config, Some(preferred_user.as_str())); assert_eq!( decoded.len(), config.access.users.len(), @@ -1264,6 +1257,7 @@ async fn timing_matrix_tls_classes_under_fixed_delay_budget() { const ITER: usize = 48; const BUCKET_MS: u128 = 10; + let shared = ProxySharedState::new(); let secret = [0x77u8; 16]; let mut config = test_config_with_secret_hex("77777777777777777777777777777777"); config.censorship.alpn_enforce = true; @@ -1289,7 +1283,7 @@ async fn timing_matrix_tls_classes_under_fixed_delay_budget() { for (class, probe) in classes { let mut samples_ms = Vec::with_capacity(ITER); for idx in 0..ITER { - clear_auth_probe_state_for_testing(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let replay_checker = ReplayChecker::new(4096, Duration::from_secs(60)); let peer: SocketAddr = SocketAddr::from((base_ip, 44_000 + idx as u16)); let started = Instant::now(); @@ -1411,17 +1405,13 @@ fn mode_policy_matrix_is_stable_for_all_tag_transport_mode_combinations() { #[test] fn invalid_secret_warning_keys_do_not_collide_on_colon_boundaries() { - let _guard = warned_secrets_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_warned_secrets_for_testing(); + let shared = ProxySharedState::new(); + clear_warned_secrets_for_testing_in_shared(shared.as_ref()); - warn_invalid_secret_once("a:b", "c", ACCESS_SECRET_BYTES, Some(1)); - warn_invalid_secret_once("a", "b:c", ACCESS_SECRET_BYTES, Some(2)); + warn_invalid_secret_once_in(shared.as_ref(), "a:b", "c", ACCESS_SECRET_BYTES, Some(1)); + warn_invalid_secret_once_in(shared.as_ref(), "a", "b:c", ACCESS_SECRET_BYTES, Some(2)); - let warned = INVALID_SECRET_WARNED - .get() - .expect("warned set must be initialized"); + let warned = warned_secrets_for_testing_in_shared(shared.as_ref()); let guard = warned.lock().expect("warned set lock must be available"); assert_eq!( guard.len(), @@ -1432,19 +1422,15 @@ fn invalid_secret_warning_keys_do_not_collide_on_colon_boundaries() { #[test] fn invalid_secret_warning_cache_is_bounded() { - let _guard = warned_secrets_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_warned_secrets_for_testing(); + let shared = ProxySharedState::new(); + clear_warned_secrets_for_testing_in_shared(shared.as_ref()); for idx in 0..(WARNED_SECRET_MAX_ENTRIES + 32) { let user = format!("warned_user_{idx}"); - warn_invalid_secret_once(&user, "invalid_length", ACCESS_SECRET_BYTES, Some(idx)); + warn_invalid_secret_once_in(shared.as_ref(), &user, "invalid_length", ACCESS_SECRET_BYTES, Some(idx)); } - let warned = INVALID_SECRET_WARNED - .get() - .expect("warned set must be initialized"); + let warned = warned_secrets_for_testing_in_shared(shared.as_ref()); let guard = warned.lock().expect("warned set lock must be available"); assert_eq!( guard.len(), @@ -1455,10 +1441,8 @@ fn invalid_secret_warning_cache_is_bounded() { #[tokio::test] async fn repeated_invalid_tls_probes_trigger_pre_auth_throttle() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let config = test_config_with_secret_hex("11111111111111111111111111111111"); let replay_checker = ReplayChecker::new(128, Duration::from_secs(60)); @@ -1469,7 +1453,7 @@ async fn repeated_invalid_tls_probes_trigger_pre_auth_throttle() { invalid[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = 32; for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS { - let result = handle_tls_handshake( + let result = handle_tls_handshake_with_shared( &invalid, tokio::io::empty(), tokio::io::sink(), @@ -1478,13 +1462,14 @@ async fn repeated_invalid_tls_probes_trigger_pre_auth_throttle() { &replay_checker, &rng, None, + shared.as_ref(), ) .await; assert!(matches!(result, HandshakeResult::BadClient { .. })); } assert!( - auth_probe_fail_streak_for_testing(peer.ip()) + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()) .is_some_and(|streak| streak >= AUTH_PROBE_BACKOFF_START_FAILS), "invalid probe burst must grow pre-auth failure streak to backoff threshold" ); @@ -1492,10 +1477,8 @@ async fn repeated_invalid_tls_probes_trigger_pre_auth_throttle() { #[tokio::test] async fn successful_tls_handshake_clears_pre_auth_failure_streak() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0x23u8; 16]; let config = test_config_with_secret_hex("23232323232323232323232323232323"); @@ -1507,7 +1490,7 @@ async fn successful_tls_handshake_clears_pre_auth_failure_streak() { invalid[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = 32; for expected in 1..AUTH_PROBE_BACKOFF_START_FAILS { - let result = handle_tls_handshake( + let result = handle_tls_handshake_with_shared( &invalid, tokio::io::empty(), tokio::io::sink(), @@ -1516,18 +1499,19 @@ async fn successful_tls_handshake_clears_pre_auth_failure_streak() { &replay_checker, &rng, None, + shared.as_ref(), ) .await; assert!(matches!(result, HandshakeResult::BadClient { .. })); assert_eq!( - auth_probe_fail_streak_for_testing(peer.ip()), + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), Some(expected), "failure streak must grow before a successful authentication" ); } let valid = make_valid_tls_handshake(&secret, 0); - let success = handle_tls_handshake( + let success = handle_tls_handshake_with_shared( &valid, tokio::io::empty(), tokio::io::sink(), @@ -1536,12 +1520,13 @@ async fn successful_tls_handshake_clears_pre_auth_failure_streak() { &replay_checker, &rng, None, + shared.as_ref(), ) .await; assert!(matches!(success, HandshakeResult::Success(_))); assert_eq!( - auth_probe_fail_streak_for_testing(peer.ip()), + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), None, "successful authentication must clear accumulated pre-auth failures" ); @@ -1549,6 +1534,7 @@ async fn successful_tls_handshake_clears_pre_auth_failure_streak() { #[test] fn auth_probe_capacity_prunes_stale_entries_for_new_ips() { + let shared = ProxySharedState::new(); let state = DashMap::new(); let now = Instant::now(); let stale_seen = now - Duration::from_secs(AUTH_PROBE_TRACK_RETENTION_SECS + 1); @@ -1571,7 +1557,7 @@ fn auth_probe_capacity_prunes_stale_entries_for_new_ips() { } let newcomer = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 200)); - auth_probe_record_failure_with_state(&state, newcomer, now); + auth_probe_record_failure_with_state_in(shared.as_ref(), &state, newcomer, now); assert_eq!( state.get(&newcomer).map(|entry| entry.fail_streak), @@ -1586,10 +1572,8 @@ fn auth_probe_capacity_prunes_stale_entries_for_new_ips() { #[test] fn auth_probe_capacity_fresh_full_map_still_tracks_newcomer_with_bounded_eviction() { - let _guard = auth_probe_test_lock() - .lock() - .expect("auth probe test lock must be available"); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let state = DashMap::new(); let now = Instant::now(); @@ -1622,7 +1606,7 @@ fn auth_probe_capacity_fresh_full_map_still_tracks_newcomer_with_bounded_evictio ); let newcomer = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 55)); - auth_probe_record_failure_with_state(&state, newcomer, now); + auth_probe_record_failure_with_state_in(shared.as_ref(), &state, newcomer, now); assert!( state.get(&newcomer).is_some(), @@ -1638,7 +1622,7 @@ fn auth_probe_capacity_fresh_full_map_still_tracks_newcomer_with_bounded_evictio "auth probe map must stay at configured cap after bounded eviction" ); assert!( - auth_probe_saturation_is_throttled_at_for_testing(now), + auth_probe_saturation_is_throttled_at_for_testing_in_shared(shared.as_ref(), now), "capacity pressure should still activate coarse global pre-auth throttling" ); } @@ -1646,23 +1630,21 @@ fn auth_probe_capacity_fresh_full_map_still_tracks_newcomer_with_bounded_evictio #[test] fn unknown_sni_warn_cooldown_first_event_is_warn_and_repeated_events_are_info_until_window_expires() { - let _guard = unknown_sni_warn_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_unknown_sni_warn_state_for_testing(); + let shared = ProxySharedState::new(); + clear_unknown_sni_warn_state_for_testing_in_shared(shared.as_ref()); let now = Instant::now(); assert!( - should_emit_unknown_sni_warn_for_testing(now), + should_emit_unknown_sni_warn_for_testing_in_shared(shared.as_ref(), now), "first unknown SNI event must be eligible for WARN emission" ); assert!( - !should_emit_unknown_sni_warn_for_testing(now + Duration::from_secs(1)), + !should_emit_unknown_sni_warn_for_testing_in_shared(shared.as_ref(), now + Duration::from_secs(1)), "events inside cooldown window must be demoted from WARN to INFO" ); assert!( - should_emit_unknown_sni_warn_for_testing( + should_emit_unknown_sni_warn_for_testing_in_shared(shared.as_ref(), now + Duration::from_secs(UNKNOWN_SNI_WARN_COOLDOWN_SECS) ), "once cooldown expires, next unknown SNI event must be WARN-eligible again" @@ -1671,10 +1653,8 @@ fn unknown_sni_warn_cooldown_first_event_is_warn_and_repeated_events_are_info_un #[test] fn stress_auth_probe_full_map_churn_keeps_bound_and_tracks_newcomers() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let state = DashMap::new(); let base_now = Instant::now(); @@ -1704,7 +1684,7 @@ fn stress_auth_probe_full_map_churn_keeps_bound_and_tracks_newcomers() { (step & 0xff) as u8, )); let now = base_now + Duration::from_millis(10_000 + step as u64); - auth_probe_record_failure_with_state(&state, newcomer, now); + auth_probe_record_failure_with_state_in(shared.as_ref(), &state, newcomer, now); assert!( state.get(&newcomer).is_some(), @@ -1720,10 +1700,8 @@ fn stress_auth_probe_full_map_churn_keeps_bound_and_tracks_newcomers() { #[test] fn auth_probe_over_cap_churn_still_tracks_newcomer_after_round_limit() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let state = DashMap::new(); let now = Instant::now(); @@ -1747,7 +1725,7 @@ fn auth_probe_over_cap_churn_still_tracks_newcomer_after_round_limit() { } let newcomer = IpAddr::V4(Ipv4Addr::new(203, 0, 114, 77)); - auth_probe_record_failure_with_state(&state, newcomer, now + Duration::from_secs(1)); + auth_probe_record_failure_with_state_in(shared.as_ref(), &state, newcomer, now + Duration::from_secs(1)); assert!( state.get(&newcomer).is_some(), @@ -1761,10 +1739,8 @@ fn auth_probe_over_cap_churn_still_tracks_newcomer_after_round_limit() { #[test] fn auth_probe_capacity_prefers_evicting_low_fail_streak_entries_first() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let state = DashMap::new(); let now = Instant::now(); @@ -1808,7 +1784,7 @@ fn auth_probe_capacity_prefers_evicting_low_fail_streak_entries_first() { ); let newcomer = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 201)); - auth_probe_record_failure_with_state(&state, newcomer, now); + auth_probe_record_failure_with_state_in(shared.as_ref(), &state, newcomer, now); assert!(state.get(&newcomer).is_some(), "new source must be tracked"); assert!( @@ -1823,10 +1799,8 @@ fn auth_probe_capacity_prefers_evicting_low_fail_streak_entries_first() { #[test] fn auth_probe_capacity_tie_breaker_evicts_oldest_with_equal_fail_streak() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let state = DashMap::new(); let now = Instant::now(); @@ -1868,7 +1842,7 @@ fn auth_probe_capacity_tie_breaker_evicts_oldest_with_equal_fail_streak() { ); let newcomer = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 202)); - auth_probe_record_failure_with_state(&state, newcomer, now); + auth_probe_record_failure_with_state_in(shared.as_ref(), &state, newcomer, now); assert!(state.get(&newcomer).is_some(), "new source must be tracked"); assert!( @@ -1883,10 +1857,8 @@ fn auth_probe_capacity_tie_breaker_evicts_oldest_with_equal_fail_streak() { #[test] fn stress_auth_probe_capacity_churn_preserves_high_fail_sentinels() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let state = DashMap::new(); let base_now = Instant::now(); @@ -1936,7 +1908,7 @@ fn stress_auth_probe_capacity_churn_preserves_high_fail_sentinels() { (step & 0xff) as u8, )); let now = base_now + Duration::from_millis(10_000 + step as u64); - auth_probe_record_failure_with_state(&state, newcomer, now); + auth_probe_record_failure_with_state_in(shared.as_ref(), &state, newcomer, now); assert_eq!( state.len(), @@ -1952,14 +1924,15 @@ fn stress_auth_probe_capacity_churn_preserves_high_fail_sentinels() { #[test] fn auth_probe_ipv6_is_bucketed_by_prefix_64() { + let shared = ProxySharedState::new(); let state = DashMap::new(); let now = Instant::now(); let ip_a = IpAddr::V6("2001:db8:abcd:1234:1:2:3:4".parse().unwrap()); let ip_b = IpAddr::V6("2001:db8:abcd:1234:ffff:eeee:dddd:cccc".parse().unwrap()); - auth_probe_record_failure_with_state(&state, normalize_auth_probe_ip(ip_a), now); - auth_probe_record_failure_with_state(&state, normalize_auth_probe_ip(ip_b), now); + auth_probe_record_failure_with_state_in(shared.as_ref(), &state, normalize_auth_probe_ip(ip_a), now); + auth_probe_record_failure_with_state_in(shared.as_ref(), &state, normalize_auth_probe_ip(ip_b), now); let normalized = normalize_auth_probe_ip(ip_a); assert_eq!( @@ -1976,14 +1949,15 @@ fn auth_probe_ipv6_is_bucketed_by_prefix_64() { #[test] fn auth_probe_ipv6_different_prefixes_use_distinct_buckets() { + let shared = ProxySharedState::new(); let state = DashMap::new(); let now = Instant::now(); let ip_a = IpAddr::V6("2001:db8:1111:2222:1:2:3:4".parse().unwrap()); let ip_b = IpAddr::V6("2001:db8:1111:3333:1:2:3:4".parse().unwrap()); - auth_probe_record_failure_with_state(&state, normalize_auth_probe_ip(ip_a), now); - auth_probe_record_failure_with_state(&state, normalize_auth_probe_ip(ip_b), now); + auth_probe_record_failure_with_state_in(shared.as_ref(), &state, normalize_auth_probe_ip(ip_a), now); + auth_probe_record_failure_with_state_in(shared.as_ref(), &state, normalize_auth_probe_ip(ip_b), now); assert_eq!( state.len(), @@ -2006,25 +1980,23 @@ fn auth_probe_ipv6_different_prefixes_use_distinct_buckets() { #[test] fn auth_probe_success_clears_whole_ipv6_prefix_bucket() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let now = Instant::now(); let ip_fail = IpAddr::V6("2001:db8:aaaa:bbbb:1:2:3:4".parse().unwrap()); let ip_success = IpAddr::V6("2001:db8:aaaa:bbbb:ffff:eeee:dddd:cccc".parse().unwrap()); - auth_probe_record_failure(ip_fail, now); + auth_probe_record_failure_in(shared.as_ref(), ip_fail, now); assert_eq!( - auth_probe_fail_streak_for_testing(ip_fail), + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), ip_fail), Some(1), "precondition: normalized prefix bucket must exist" ); - auth_probe_record_success(ip_success); + auth_probe_record_success_in(shared.as_ref(), ip_success); assert_eq!( - auth_probe_fail_streak_for_testing(ip_fail), + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), ip_fail), None, "success from the same /64 must clear the shared bucket" ); @@ -2032,13 +2004,14 @@ fn auth_probe_success_clears_whole_ipv6_prefix_bucket() { #[test] fn auth_probe_eviction_offset_varies_with_input() { + let shared = ProxySharedState::new(); let now = Instant::now(); let ip1 = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 10)); let ip2 = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 11)); - let a = auth_probe_eviction_offset(ip1, now); - let b = auth_probe_eviction_offset(ip1, now); - let c = auth_probe_eviction_offset(ip2, now); + let a = auth_probe_eviction_offset_in(shared.as_ref(), ip1, now); + let b = auth_probe_eviction_offset_in(shared.as_ref(), ip1, now); + let c = auth_probe_eviction_offset_in(shared.as_ref(), ip2, now); assert_eq!(a, b, "same input must yield deterministic offset"); assert_ne!(a, c, "different peer IPs should not collapse to one offset"); @@ -2046,12 +2019,13 @@ fn auth_probe_eviction_offset_varies_with_input() { #[test] fn auth_probe_eviction_offset_changes_with_time_component() { + let shared = ProxySharedState::new(); let ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 77)); let now = Instant::now(); let later = now + Duration::from_millis(1); - let a = auth_probe_eviction_offset(ip, now); - let b = auth_probe_eviction_offset(ip, later); + let a = auth_probe_eviction_offset_in(shared.as_ref(), ip, now); + let b = auth_probe_eviction_offset_in(shared.as_ref(), ip, later); assert_ne!( a, b, @@ -2061,10 +2035,8 @@ fn auth_probe_eviction_offset_changes_with_time_component() { #[test] fn auth_probe_round_limited_overcap_eviction_marks_saturation_and_keeps_newcomer_trackable() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let state = DashMap::new(); let now = Instant::now(); @@ -2098,7 +2070,7 @@ fn auth_probe_round_limited_overcap_eviction_marks_saturation_and_keeps_newcomer } let newcomer = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 40)); - auth_probe_record_failure_with_state(&state, newcomer, now + Duration::from_millis(1)); + auth_probe_record_failure_with_state_in(shared.as_ref(), &state, newcomer, now + Duration::from_millis(1)); assert!( state.get(&newcomer).is_some(), @@ -2109,17 +2081,15 @@ fn auth_probe_round_limited_overcap_eviction_marks_saturation_and_keeps_newcomer "high fail-streak sentinel must survive round-limited eviction" ); assert!( - auth_probe_saturation_is_throttled_at_for_testing(now + Duration::from_millis(1)), + auth_probe_saturation_is_throttled_at_for_testing_in_shared(shared.as_ref(), now + Duration::from_millis(1)), "round-limited over-cap path must activate saturation throttle marker" ); } #[tokio::test] async fn gap_t01_short_tls_probe_burst_is_throttled() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let config = test_config_with_secret_hex("11111111111111111111111111111111"); let replay_checker = ReplayChecker::new(128, Duration::from_secs(60)); @@ -2129,7 +2099,7 @@ async fn gap_t01_short_tls_probe_burst_is_throttled() { let too_short = vec![0x16, 0x03, 0x01]; for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS { - let result = handle_tls_handshake( + let result = handle_tls_handshake_with_shared( &too_short, tokio::io::empty(), tokio::io::sink(), @@ -2138,13 +2108,14 @@ async fn gap_t01_short_tls_probe_burst_is_throttled() { &replay_checker, &rng, None, + shared.as_ref(), ) .await; assert!(matches!(result, HandshakeResult::BadClient { .. })); } assert!( - auth_probe_fail_streak_for_testing(peer.ip()) + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()) .is_some_and(|streak| streak >= AUTH_PROBE_BACKOFF_START_FAILS), "short TLS probe bursts must increase auth-probe fail streak" ); @@ -2152,10 +2123,8 @@ async fn gap_t01_short_tls_probe_burst_is_throttled() { #[test] fn stress_auth_probe_overcap_churn_does_not_starve_high_threat_sentinel_bucket() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let state = DashMap::new(); let base_now = Instant::now(); @@ -2194,7 +2163,7 @@ fn stress_auth_probe_overcap_churn_does_not_starve_high_threat_sentinel_bucket() ((step >> 8) & 0xff) as u8, (step & 0xff) as u8, )); - auth_probe_record_failure_with_state( + auth_probe_record_failure_with_state_in(shared.as_ref(), &state, newcomer, base_now + Duration::from_millis(step as u64 + 1), @@ -2213,10 +2182,8 @@ fn stress_auth_probe_overcap_churn_does_not_starve_high_threat_sentinel_bucket() #[test] fn light_fuzz_auth_probe_overcap_eviction_prefers_less_threatening_entries() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let now = Instant::now(); let mut s: u64 = 0xBADC_0FFE_EE11_2233; @@ -2259,7 +2226,7 @@ fn light_fuzz_auth_probe_overcap_eviction_prefers_less_threatening_entries() { ((round >> 8) & 0xff) as u8, (round & 0xff) as u8, )); - auth_probe_record_failure_with_state( + auth_probe_record_failure_with_state_in(shared.as_ref(), &state, newcomer, now + Duration::from_millis(round as u64 + 1), @@ -2277,6 +2244,7 @@ fn light_fuzz_auth_probe_overcap_eviction_prefers_less_threatening_entries() { } #[test] fn light_fuzz_auth_probe_eviction_offset_is_deterministic_per_input_pair() { + let shared = ProxySharedState::new(); let mut rng = StdRng::seed_from_u64(0xA11CE5EED); let base = Instant::now(); @@ -2290,8 +2258,8 @@ fn light_fuzz_auth_probe_eviction_offset_is_deterministic_per_input_pair() { let offset_ns = rng.random_range(0_u64..2_000_000); let when = base + Duration::from_nanos(offset_ns); - let first = auth_probe_eviction_offset(ip, when); - let second = auth_probe_eviction_offset(ip, when); + let first = auth_probe_eviction_offset_in(shared.as_ref(), ip, when); + let second = auth_probe_eviction_offset_in(shared.as_ref(), ip, when); assert_eq!( first, second, "eviction offset must be stable for identical (ip, now) pairs" @@ -2301,6 +2269,7 @@ fn light_fuzz_auth_probe_eviction_offset_is_deterministic_per_input_pair() { #[test] fn adversarial_eviction_offset_spread_avoids_single_bucket_collapse() { + let shared = ProxySharedState::new(); let modulus = AUTH_PROBE_TRACK_MAX_ENTRIES; let mut bucket_hits = vec![0usize; modulus]; let now = Instant::now(); @@ -2312,7 +2281,7 @@ fn adversarial_eviction_offset_spread_avoids_single_bucket_collapse() { (idx & 0xff) as u8, ((idx.wrapping_mul(37)) & 0xff) as u8, )); - let bucket = auth_probe_eviction_offset(ip, now) % modulus; + let bucket = auth_probe_eviction_offset_in(shared.as_ref(), ip, now) % modulus; bucket_hits[bucket] += 1; } @@ -2337,6 +2306,7 @@ fn adversarial_eviction_offset_spread_avoids_single_bucket_collapse() { #[test] fn stress_auth_probe_eviction_offset_high_volume_uniqueness_sanity() { + let shared = ProxySharedState::new(); let now = Instant::now(); let mut seen = std::collections::HashSet::new(); @@ -2347,7 +2317,7 @@ fn stress_auth_probe_eviction_offset_high_volume_uniqueness_sanity() { ((idx >> 8) & 0xff) as u8, (idx & 0xff) as u8, )); - seen.insert(auth_probe_eviction_offset(ip, now)); + seen.insert(auth_probe_eviction_offset_in(shared.as_ref(), ip, now)); } assert!( @@ -2358,10 +2328,8 @@ fn stress_auth_probe_eviction_offset_high_volume_uniqueness_sanity() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn auth_probe_concurrent_failures_do_not_lose_fail_streak_updates() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let peer_ip: IpAddr = "198.51.100.90".parse().unwrap(); let tasks = 128usize; @@ -2370,9 +2338,10 @@ async fn auth_probe_concurrent_failures_do_not_lose_fail_streak_updates() { for _ in 0..tasks { let barrier = barrier.clone(); + let shared = shared.clone(); handles.push(tokio::spawn(async move { barrier.wait().await; - auth_probe_record_failure(peer_ip, Instant::now()); + auth_probe_record_failure_in(shared.as_ref(), peer_ip, Instant::now()); })); } @@ -2382,7 +2351,7 @@ async fn auth_probe_concurrent_failures_do_not_lose_fail_streak_updates() { .expect("concurrent failure recording task must not panic"); } - let streak = auth_probe_fail_streak_for_testing(peer_ip) + let streak = auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer_ip) .expect("tracked peer must exist after concurrent failure burst"); assert_eq!( streak as usize, tasks, @@ -2392,10 +2361,8 @@ async fn auth_probe_concurrent_failures_do_not_lose_fail_streak_updates() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn invalid_probe_noise_from_other_ips_does_not_break_valid_tls_handshake() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0x31u8; 16]; let config = Arc::new(test_config_with_secret_hex( @@ -2464,7 +2431,7 @@ async fn invalid_probe_noise_from_other_ips_does_not_break_valid_tls_handshake() "invalid probe noise from other IPs must not block a valid victim handshake" ); assert_eq!( - auth_probe_fail_streak_for_testing(victim_peer.ip()), + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), victim_peer.ip()), None, "successful victim handshake must not retain pre-auth failure streak" ); @@ -2472,13 +2439,11 @@ async fn invalid_probe_noise_from_other_ips_does_not_break_valid_tls_handshake() #[test] fn auth_probe_saturation_state_expires_after_retention_window() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let now = Instant::now(); - let saturation = auth_probe_saturation_state(); + let saturation = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()); { let mut guard = saturation .lock() @@ -2491,7 +2456,7 @@ fn auth_probe_saturation_state_expires_after_retention_window() { } assert!( - !auth_probe_saturation_is_throttled_for_testing(), + !auth_probe_saturation_is_throttled_for_testing_in_shared(shared.as_ref()), "expired saturation state must stop throttling and self-clear" ); @@ -2503,10 +2468,8 @@ fn auth_probe_saturation_state_expires_after_retention_window() { #[tokio::test] async fn global_saturation_marker_does_not_block_valid_tls_handshake() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0x41u8; 16]; let config = test_config_with_secret_hex("41414141414141414141414141414141"); @@ -2515,7 +2478,7 @@ async fn global_saturation_marker_does_not_block_valid_tls_handshake() { let peer: SocketAddr = "198.51.100.101:45101".parse().unwrap(); let now = Instant::now(); - let saturation = auth_probe_saturation_state(); + let saturation = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()); { let mut guard = saturation .lock() @@ -2528,7 +2491,7 @@ async fn global_saturation_marker_does_not_block_valid_tls_handshake() { } let valid = make_valid_tls_handshake(&secret, 0); - let result = handle_tls_handshake( + let result = handle_tls_handshake_with_shared( &valid, tokio::io::empty(), tokio::io::sink(), @@ -2537,6 +2500,7 @@ async fn global_saturation_marker_does_not_block_valid_tls_handshake() { &replay_checker, &rng, None, + shared.as_ref(), ) .await; @@ -2545,7 +2509,7 @@ async fn global_saturation_marker_does_not_block_valid_tls_handshake() { "global saturation marker must not block valid authenticated TLS handshakes" ); assert_eq!( - auth_probe_fail_streak_for_testing(peer.ip()), + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), None, "successful handshake under saturation marker must not retain per-ip probe failures" ); @@ -2553,10 +2517,8 @@ async fn global_saturation_marker_does_not_block_valid_tls_handshake() { #[tokio::test] async fn expired_global_saturation_allows_valid_tls_handshake() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0x55u8; 16]; let config = test_config_with_secret_hex("55555555555555555555555555555555"); @@ -2565,7 +2527,7 @@ async fn expired_global_saturation_allows_valid_tls_handshake() { let peer: SocketAddr = "198.51.100.102:45102".parse().unwrap(); let now = Instant::now(); - let saturation = auth_probe_saturation_state(); + let saturation = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()); { let mut guard = saturation .lock() @@ -2578,7 +2540,7 @@ async fn expired_global_saturation_allows_valid_tls_handshake() { } let valid = make_valid_tls_handshake(&secret, 0); - let result = handle_tls_handshake( + let result = handle_tls_handshake_with_shared( &valid, tokio::io::empty(), tokio::io::sink(), @@ -2587,6 +2549,7 @@ async fn expired_global_saturation_allows_valid_tls_handshake() { &replay_checker, &rng, None, + shared.as_ref(), ) .await; @@ -2598,10 +2561,8 @@ async fn expired_global_saturation_allows_valid_tls_handshake() { #[tokio::test] async fn valid_tls_is_blocked_by_per_ip_preauth_throttle_without_saturation() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0x61u8; 16]; let config = test_config_with_secret_hex("61616161616161616161616161616161"); @@ -2609,7 +2570,7 @@ async fn valid_tls_is_blocked_by_per_ip_preauth_throttle_without_saturation() { let rng = SecureRandom::new(); let peer: SocketAddr = "198.51.100.103:45103".parse().unwrap(); - auth_probe_state_map().insert( + auth_probe_state_for_testing_in_shared(shared.as_ref()).insert( normalize_auth_probe_ip(peer.ip()), AuthProbeState { fail_streak: AUTH_PROBE_BACKOFF_START_FAILS, @@ -2619,7 +2580,7 @@ async fn valid_tls_is_blocked_by_per_ip_preauth_throttle_without_saturation() { ); let valid = make_valid_tls_handshake(&secret, 0); - let result = handle_tls_handshake( + let result = handle_tls_handshake_with_shared( &valid, tokio::io::empty(), tokio::io::sink(), @@ -2628,6 +2589,7 @@ async fn valid_tls_is_blocked_by_per_ip_preauth_throttle_without_saturation() { &replay_checker, &rng, None, + shared.as_ref(), ) .await; @@ -2636,10 +2598,8 @@ async fn valid_tls_is_blocked_by_per_ip_preauth_throttle_without_saturation() { #[tokio::test] async fn saturation_allows_valid_tls_even_when_peer_ip_is_currently_throttled() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0x62u8; 16]; let config = test_config_with_secret_hex("62626262626262626262626262626262"); @@ -2648,7 +2608,7 @@ async fn saturation_allows_valid_tls_even_when_peer_ip_is_currently_throttled() let peer: SocketAddr = "198.51.100.104:45104".parse().unwrap(); let now = Instant::now(); - auth_probe_state_map().insert( + auth_probe_state_for_testing_in_shared(shared.as_ref()).insert( normalize_auth_probe_ip(peer.ip()), AuthProbeState { fail_streak: AUTH_PROBE_BACKOFF_START_FAILS, @@ -2657,7 +2617,7 @@ async fn saturation_allows_valid_tls_even_when_peer_ip_is_currently_throttled() }, ); { - let mut guard = auth_probe_saturation_state() + let mut guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()) .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); *guard = Some(AuthProbeSaturationState { @@ -2668,7 +2628,7 @@ async fn saturation_allows_valid_tls_even_when_peer_ip_is_currently_throttled() } let valid = make_valid_tls_handshake(&secret, 0); - let result = handle_tls_handshake( + let result = handle_tls_handshake_with_shared( &valid, tokio::io::empty(), tokio::io::sink(), @@ -2677,12 +2637,13 @@ async fn saturation_allows_valid_tls_even_when_peer_ip_is_currently_throttled() &replay_checker, &rng, None, + shared.as_ref(), ) .await; assert!(matches!(result, HandshakeResult::Success(_))); assert_eq!( - auth_probe_fail_streak_for_testing(peer.ip()), + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), None, "successful auth under saturation must clear the peer's throttled state" ); @@ -2690,10 +2651,8 @@ async fn saturation_allows_valid_tls_even_when_peer_ip_is_currently_throttled() #[tokio::test] async fn saturation_still_rejects_invalid_tls_probe_and_records_failure() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let config = test_config_with_secret_hex("63636363636363636363636363636363"); let replay_checker = ReplayChecker::new(128, Duration::from_secs(60)); @@ -2701,7 +2660,7 @@ async fn saturation_still_rejects_invalid_tls_probe_and_records_failure() { let peer: SocketAddr = "198.51.100.105:45105".parse().unwrap(); let now = Instant::now(); { - let mut guard = auth_probe_saturation_state() + let mut guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()) .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); *guard = Some(AuthProbeSaturationState { @@ -2714,7 +2673,7 @@ async fn saturation_still_rejects_invalid_tls_probe_and_records_failure() { let mut invalid = vec![0x42u8; tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN + 1 + 32]; invalid[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = 32; - let result = handle_tls_handshake( + let result = handle_tls_handshake_with_shared( &invalid, tokio::io::empty(), tokio::io::sink(), @@ -2723,12 +2682,13 @@ async fn saturation_still_rejects_invalid_tls_probe_and_records_failure() { &replay_checker, &rng, None, + shared.as_ref(), ) .await; assert!(matches!(result, HandshakeResult::BadClient { .. })); assert_eq!( - auth_probe_fail_streak_for_testing(peer.ip()), + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), Some(1), "invalid TLS during saturation must still increment per-ip failure tracking" ); @@ -2736,17 +2696,15 @@ async fn saturation_still_rejects_invalid_tls_probe_and_records_failure() { #[tokio::test] async fn saturation_grace_exhaustion_preauth_throttles_repeated_invalid_tls_probe() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let config = test_config_with_secret_hex("63636363636363636363636363636363"); let replay_checker = ReplayChecker::new(128, Duration::from_secs(60)); let rng = SecureRandom::new(); let peer: SocketAddr = "198.51.100.205:45205".parse().unwrap(); let now = Instant::now(); - auth_probe_state_map().insert( + auth_probe_state_for_testing_in_shared(shared.as_ref()).insert( normalize_auth_probe_ip(peer.ip()), AuthProbeState { fail_streak: AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS, @@ -2755,7 +2713,7 @@ async fn saturation_grace_exhaustion_preauth_throttles_repeated_invalid_tls_prob }, ); { - let mut guard = auth_probe_saturation_state() + let mut guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()) .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); *guard = Some(AuthProbeSaturationState { @@ -2782,7 +2740,7 @@ async fn saturation_grace_exhaustion_preauth_throttles_repeated_invalid_tls_prob assert!(matches!(result, HandshakeResult::BadClient { .. })); assert_eq!( - auth_probe_fail_streak_for_testing(peer.ip()), + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), Some(AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS), "pre-auth throttle under exhausted saturation grace must reject without re-processing invalid TLS" ); @@ -2790,10 +2748,8 @@ async fn saturation_grace_exhaustion_preauth_throttles_repeated_invalid_tls_prob #[tokio::test] async fn saturation_allows_valid_mtproto_even_when_peer_ip_is_currently_throttled() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret_hex = "64646464646464646464646464646464"; let mut config = test_config_with_secret_hex(secret_hex); @@ -2802,7 +2758,7 @@ async fn saturation_allows_valid_mtproto_even_when_peer_ip_is_currently_throttle let peer: SocketAddr = "198.51.100.106:45106".parse().unwrap(); let now = Instant::now(); - auth_probe_state_map().insert( + auth_probe_state_for_testing_in_shared(shared.as_ref()).insert( normalize_auth_probe_ip(peer.ip()), AuthProbeState { fail_streak: AUTH_PROBE_BACKOFF_START_FAILS, @@ -2811,7 +2767,7 @@ async fn saturation_allows_valid_mtproto_even_when_peer_ip_is_currently_throttle }, ); { - let mut guard = auth_probe_saturation_state() + let mut guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()) .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); *guard = Some(AuthProbeSaturationState { @@ -2822,7 +2778,7 @@ async fn saturation_allows_valid_mtproto_even_when_peer_ip_is_currently_throttle } let valid = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2); - let result = handle_mtproto_handshake( + let result = handle_mtproto_handshake_with_shared( &valid, tokio::io::empty(), tokio::io::sink(), @@ -2831,12 +2787,13 @@ async fn saturation_allows_valid_mtproto_even_when_peer_ip_is_currently_throttle &replay_checker, false, None, + shared.as_ref(), ) .await; assert!(matches!(result, HandshakeResult::Success(_))); assert_eq!( - auth_probe_fail_streak_for_testing(peer.ip()), + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), None, "successful mtproto auth under saturation must clear the peer's throttled state" ); @@ -2844,17 +2801,15 @@ async fn saturation_allows_valid_mtproto_even_when_peer_ip_is_currently_throttle #[tokio::test] async fn saturation_still_rejects_invalid_mtproto_probe_and_records_failure() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let config = test_config_with_secret_hex("65656565656565656565656565656565"); let replay_checker = ReplayChecker::new(128, Duration::from_secs(60)); let peer: SocketAddr = "198.51.100.107:45107".parse().unwrap(); let now = Instant::now(); { - let mut guard = auth_probe_saturation_state() + let mut guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()) .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); *guard = Some(AuthProbeSaturationState { @@ -2866,7 +2821,7 @@ async fn saturation_still_rejects_invalid_mtproto_probe_and_records_failure() { let invalid = [0u8; HANDSHAKE_LEN]; - let result = handle_mtproto_handshake( + let result = handle_mtproto_handshake_with_shared( &invalid, tokio::io::empty(), tokio::io::sink(), @@ -2875,12 +2830,13 @@ async fn saturation_still_rejects_invalid_mtproto_probe_and_records_failure() { &replay_checker, false, None, + shared.as_ref(), ) .await; assert!(matches!(result, HandshakeResult::BadClient { .. })); assert_eq!( - auth_probe_fail_streak_for_testing(peer.ip()), + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), Some(1), "invalid mtproto during saturation must still increment per-ip failure tracking" ); @@ -2888,16 +2844,14 @@ async fn saturation_still_rejects_invalid_mtproto_probe_and_records_failure() { #[tokio::test] async fn saturation_grace_exhaustion_preauth_throttles_repeated_invalid_mtproto_probe() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let config = test_config_with_secret_hex("65656565656565656565656565656565"); let replay_checker = ReplayChecker::new(128, Duration::from_secs(60)); let peer: SocketAddr = "198.51.100.206:45206".parse().unwrap(); let now = Instant::now(); - auth_probe_state_map().insert( + auth_probe_state_for_testing_in_shared(shared.as_ref()).insert( normalize_auth_probe_ip(peer.ip()), AuthProbeState { fail_streak: AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS, @@ -2906,7 +2860,7 @@ async fn saturation_grace_exhaustion_preauth_throttles_repeated_invalid_mtproto_ }, ); { - let mut guard = auth_probe_saturation_state() + let mut guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()) .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); *guard = Some(AuthProbeSaturationState { @@ -2931,7 +2885,7 @@ async fn saturation_grace_exhaustion_preauth_throttles_repeated_invalid_mtproto_ assert!(matches!(result, HandshakeResult::BadClient { .. })); assert_eq!( - auth_probe_fail_streak_for_testing(peer.ip()), + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), Some(AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS), "pre-auth throttle under exhausted saturation grace must reject without re-processing invalid MTProto" ); @@ -2939,17 +2893,15 @@ async fn saturation_grace_exhaustion_preauth_throttles_repeated_invalid_mtproto_ #[tokio::test] async fn saturation_grace_progression_tls_reaches_cap_then_stops_incrementing() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let config = test_config_with_secret_hex("70707070707070707070707070707070"); let replay_checker = ReplayChecker::new(128, Duration::from_secs(60)); let rng = SecureRandom::new(); let peer: SocketAddr = "198.51.100.207:45207".parse().unwrap(); let now = Instant::now(); - auth_probe_state_map().insert( + auth_probe_state_for_testing_in_shared(shared.as_ref()).insert( normalize_auth_probe_ip(peer.ip()), AuthProbeState { fail_streak: AUTH_PROBE_BACKOFF_START_FAILS, @@ -2958,7 +2910,7 @@ async fn saturation_grace_progression_tls_reaches_cap_then_stops_incrementing() }, ); { - let mut guard = auth_probe_saturation_state() + let mut guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()) .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); *guard = Some(AuthProbeSaturationState { @@ -2975,7 +2927,7 @@ async fn saturation_grace_progression_tls_reaches_cap_then_stops_incrementing() AUTH_PROBE_BACKOFF_START_FAILS + 1, AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS, ] { - let result = handle_tls_handshake( + let result = handle_tls_handshake_with_shared( &invalid, tokio::io::empty(), tokio::io::sink(), @@ -2984,17 +2936,18 @@ async fn saturation_grace_progression_tls_reaches_cap_then_stops_incrementing() &replay_checker, &rng, None, + shared.as_ref(), ) .await; assert!(matches!(result, HandshakeResult::BadClient { .. })); assert_eq!( - auth_probe_fail_streak_for_testing(peer.ip()), + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), Some(expected) ); } { - let mut entry = auth_probe_state_map() + let mut entry = auth_probe_state_for_testing_in_shared(shared.as_ref()) .get_mut(&normalize_auth_probe_ip(peer.ip())) .expect("peer state must exist before exhaustion recheck"); entry.fail_streak = AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS; @@ -3002,7 +2955,7 @@ async fn saturation_grace_progression_tls_reaches_cap_then_stops_incrementing() entry.last_seen = Instant::now(); } - let result = handle_tls_handshake( + let result = handle_tls_handshake_with_shared( &invalid, tokio::io::empty(), tokio::io::sink(), @@ -3011,11 +2964,12 @@ async fn saturation_grace_progression_tls_reaches_cap_then_stops_incrementing() &replay_checker, &rng, None, + shared.as_ref(), ) .await; assert!(matches!(result, HandshakeResult::BadClient { .. })); assert_eq!( - auth_probe_fail_streak_for_testing(peer.ip()), + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), Some(AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS), "once grace is exhausted, repeated invalid TLS must be pre-auth throttled without further fail-streak growth" ); @@ -3023,16 +2977,14 @@ async fn saturation_grace_progression_tls_reaches_cap_then_stops_incrementing() #[tokio::test] async fn saturation_grace_progression_mtproto_reaches_cap_then_stops_incrementing() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let config = test_config_with_secret_hex("71717171717171717171717171717171"); let replay_checker = ReplayChecker::new(128, Duration::from_secs(60)); let peer: SocketAddr = "198.51.100.208:45208".parse().unwrap(); let now = Instant::now(); - auth_probe_state_map().insert( + auth_probe_state_for_testing_in_shared(shared.as_ref()).insert( normalize_auth_probe_ip(peer.ip()), AuthProbeState { fail_streak: AUTH_PROBE_BACKOFF_START_FAILS, @@ -3041,7 +2993,7 @@ async fn saturation_grace_progression_mtproto_reaches_cap_then_stops_incrementin }, ); { - let mut guard = auth_probe_saturation_state() + let mut guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()) .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); *guard = Some(AuthProbeSaturationState { @@ -3057,7 +3009,7 @@ async fn saturation_grace_progression_mtproto_reaches_cap_then_stops_incrementin AUTH_PROBE_BACKOFF_START_FAILS + 1, AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS, ] { - let result = handle_mtproto_handshake( + let result = handle_mtproto_handshake_with_shared( &invalid, tokio::io::empty(), tokio::io::sink(), @@ -3066,17 +3018,18 @@ async fn saturation_grace_progression_mtproto_reaches_cap_then_stops_incrementin &replay_checker, false, None, + shared.as_ref(), ) .await; assert!(matches!(result, HandshakeResult::BadClient { .. })); assert_eq!( - auth_probe_fail_streak_for_testing(peer.ip()), + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), Some(expected) ); } { - let mut entry = auth_probe_state_map() + let mut entry = auth_probe_state_for_testing_in_shared(shared.as_ref()) .get_mut(&normalize_auth_probe_ip(peer.ip())) .expect("peer state must exist before exhaustion recheck"); entry.fail_streak = AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS; @@ -3084,7 +3037,7 @@ async fn saturation_grace_progression_mtproto_reaches_cap_then_stops_incrementin entry.last_seen = Instant::now(); } - let result = handle_mtproto_handshake( + let result = handle_mtproto_handshake_with_shared( &invalid, tokio::io::empty(), tokio::io::sink(), @@ -3093,11 +3046,12 @@ async fn saturation_grace_progression_mtproto_reaches_cap_then_stops_incrementin &replay_checker, false, None, + shared.as_ref(), ) .await; assert!(matches!(result, HandshakeResult::BadClient { .. })); assert_eq!( - auth_probe_fail_streak_for_testing(peer.ip()), + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), Some(AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS), "once grace is exhausted, repeated invalid MTProto must be pre-auth throttled without further fail-streak growth" ); @@ -3105,10 +3059,8 @@ async fn saturation_grace_progression_mtproto_reaches_cap_then_stops_incrementin #[tokio::test] async fn saturation_grace_boundary_still_admits_valid_tls_before_exhaustion() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0x72u8; 16]; let config = test_config_with_secret_hex("72727272727272727272727272727272"); @@ -3116,7 +3068,7 @@ async fn saturation_grace_boundary_still_admits_valid_tls_before_exhaustion() { let rng = SecureRandom::new(); let peer: SocketAddr = "198.51.100.209:45209".parse().unwrap(); let now = Instant::now(); - auth_probe_state_map().insert( + auth_probe_state_for_testing_in_shared(shared.as_ref()).insert( normalize_auth_probe_ip(peer.ip()), AuthProbeState { fail_streak: AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS - 1, @@ -3125,7 +3077,7 @@ async fn saturation_grace_boundary_still_admits_valid_tls_before_exhaustion() { }, ); { - let mut guard = auth_probe_saturation_state() + let mut guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()) .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); *guard = Some(AuthProbeSaturationState { @@ -3136,7 +3088,7 @@ async fn saturation_grace_boundary_still_admits_valid_tls_before_exhaustion() { } let valid = make_valid_tls_handshake(&secret, 0); - let result = handle_tls_handshake( + let result = handle_tls_handshake_with_shared( &valid, tokio::io::empty(), tokio::io::sink(), @@ -3145,6 +3097,7 @@ async fn saturation_grace_boundary_still_admits_valid_tls_before_exhaustion() { &replay_checker, &rng, None, + shared.as_ref(), ) .await; @@ -3152,15 +3105,13 @@ async fn saturation_grace_boundary_still_admits_valid_tls_before_exhaustion() { matches!(result, HandshakeResult::Success(_)), "valid TLS should still pass while peer remains within saturation grace budget" ); - assert_eq!(auth_probe_fail_streak_for_testing(peer.ip()), None); + assert_eq!(auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), None); } #[tokio::test] async fn saturation_grace_exhaustion_blocks_valid_tls_until_backoff_expires() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0x73u8; 16]; let config = test_config_with_secret_hex("73737373737373737373737373737373"); @@ -3168,7 +3119,7 @@ async fn saturation_grace_exhaustion_blocks_valid_tls_until_backoff_expires() { let rng = SecureRandom::new(); let peer: SocketAddr = "198.51.100.210:45210".parse().unwrap(); let now = Instant::now(); - auth_probe_state_map().insert( + auth_probe_state_for_testing_in_shared(shared.as_ref()).insert( normalize_auth_probe_ip(peer.ip()), AuthProbeState { fail_streak: AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS, @@ -3177,7 +3128,7 @@ async fn saturation_grace_exhaustion_blocks_valid_tls_until_backoff_expires() { }, ); { - let mut guard = auth_probe_saturation_state() + let mut guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()) .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); *guard = Some(AuthProbeSaturationState { @@ -3188,7 +3139,7 @@ async fn saturation_grace_exhaustion_blocks_valid_tls_until_backoff_expires() { } let valid = make_valid_tls_handshake(&secret, 0); - let blocked = handle_tls_handshake( + let blocked = handle_tls_handshake_with_shared( &valid, tokio::io::empty(), tokio::io::sink(), @@ -3197,13 +3148,14 @@ async fn saturation_grace_exhaustion_blocks_valid_tls_until_backoff_expires() { &replay_checker, &rng, None, + shared.as_ref(), ) .await; assert!(matches!(blocked, HandshakeResult::BadClient { .. })); tokio::time::sleep(Duration::from_millis(230)).await; - let allowed = handle_tls_handshake( + let allowed = handle_tls_handshake_with_shared( &valid, tokio::io::empty(), tokio::io::sink(), @@ -3212,28 +3164,27 @@ async fn saturation_grace_exhaustion_blocks_valid_tls_until_backoff_expires() { &replay_checker, &rng, None, + shared.as_ref(), ) .await; assert!( matches!(allowed, HandshakeResult::Success(_)), "valid TLS should recover after peer-specific pre-auth backoff has elapsed" ); - assert_eq!(auth_probe_fail_streak_for_testing(peer.ip()), None); + assert_eq!(auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), None); } #[tokio::test] async fn saturation_grace_exhaustion_is_shared_across_tls_and_mtproto_for_same_peer() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let config = test_config_with_secret_hex("74747474747474747474747474747474"); let replay_checker = ReplayChecker::new(128, Duration::from_secs(60)); let rng = SecureRandom::new(); let peer: SocketAddr = "198.51.100.211:45211".parse().unwrap(); let now = Instant::now(); - auth_probe_state_map().insert( + auth_probe_state_for_testing_in_shared(shared.as_ref()).insert( normalize_auth_probe_ip(peer.ip()), AuthProbeState { fail_streak: AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS, @@ -3242,7 +3193,7 @@ async fn saturation_grace_exhaustion_is_shared_across_tls_and_mtproto_for_same_p }, ); { - let mut guard = auth_probe_saturation_state() + let mut guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()) .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); *guard = Some(AuthProbeSaturationState { @@ -3283,7 +3234,7 @@ async fn saturation_grace_exhaustion_is_shared_across_tls_and_mtproto_for_same_p assert!(matches!(mtproto_result, HandshakeResult::BadClient { .. })); assert_eq!( - auth_probe_fail_streak_for_testing(peer.ip()), + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), Some(AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS), "saturation grace exhaustion must gate both TLS and MTProto pre-auth paths for one peer" ); @@ -3291,10 +3242,8 @@ async fn saturation_grace_exhaustion_is_shared_across_tls_and_mtproto_for_same_p #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn adversarial_same_peer_invalid_tls_storm_does_not_bypass_saturation_grace_cap() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let config = Arc::new(test_config_with_secret_hex( "75757575757575757575757575757575", @@ -3303,7 +3252,7 @@ async fn adversarial_same_peer_invalid_tls_storm_does_not_bypass_saturation_grac let rng = Arc::new(SecureRandom::new()); let peer: SocketAddr = "198.51.100.212:45212".parse().unwrap(); let now = Instant::now(); - auth_probe_state_map().insert( + auth_probe_state_for_testing_in_shared(shared.as_ref()).insert( normalize_auth_probe_ip(peer.ip()), AuthProbeState { fail_streak: AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS, @@ -3312,7 +3261,7 @@ async fn adversarial_same_peer_invalid_tls_storm_does_not_bypass_saturation_grac }, ); { - let mut guard = auth_probe_saturation_state() + let mut guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()) .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); *guard = Some(AuthProbeSaturationState { @@ -3353,7 +3302,7 @@ async fn adversarial_same_peer_invalid_tls_storm_does_not_bypass_saturation_grac } assert_eq!( - auth_probe_fail_streak_for_testing(peer.ip()), + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), Some(AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS), "same-peer invalid storm under exhausted grace must stay pre-auth throttled without fail-streak growth" ); @@ -3361,17 +3310,15 @@ async fn adversarial_same_peer_invalid_tls_storm_does_not_bypass_saturation_grac #[tokio::test] async fn light_fuzz_saturation_grace_tls_invalid_inputs_never_authenticate_or_panic() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let config = test_config_with_secret_hex("76767676767676767676767676767676"); let replay_checker = ReplayChecker::new(2048, Duration::from_secs(60)); let rng = SecureRandom::new(); let peer: SocketAddr = "198.51.100.213:45213".parse().unwrap(); let now = Instant::now(); - auth_probe_state_map().insert( + auth_probe_state_for_testing_in_shared(shared.as_ref()).insert( normalize_auth_probe_ip(peer.ip()), AuthProbeState { fail_streak: AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS, @@ -3380,7 +3327,7 @@ async fn light_fuzz_saturation_grace_tls_invalid_inputs_never_authenticate_or_pa }, ); { - let mut guard = auth_probe_saturation_state() + let mut guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()) .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); *guard = Some(AuthProbeSaturationState { @@ -3410,7 +3357,7 @@ async fn light_fuzz_saturation_grace_tls_invalid_inputs_never_authenticate_or_pa assert!(matches!(result, HandshakeResult::BadClient { .. })); } - let streak = auth_probe_fail_streak_for_testing(peer.ip()) + let streak = auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()) .expect("peer should remain tracked after repeated invalid fuzz probes"); assert!( streak >= AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS, @@ -3420,10 +3367,8 @@ async fn light_fuzz_saturation_grace_tls_invalid_inputs_never_authenticate_or_pa #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn adversarial_saturation_burst_only_admits_valid_tls_and_mtproto_handshakes() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret_hex = "66666666666666666666666666666666"; let secret = [0x66u8; 16]; @@ -3435,7 +3380,7 @@ async fn adversarial_saturation_burst_only_admits_valid_tls_and_mtproto_handshak let now = Instant::now(); { - let mut guard = auth_probe_saturation_state() + let mut guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()) .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); *guard = Some(AuthProbeSaturationState { @@ -3548,10 +3493,8 @@ async fn adversarial_saturation_burst_only_admits_valid_tls_and_mtproto_handshak #[tokio::test] async fn expired_saturation_keeps_per_ip_throttle_enforced_for_valid_tls() { - let _guard = auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); let secret = [0x67u8; 16]; let config = test_config_with_secret_hex("67676767676767676767676767676767"); @@ -3560,7 +3503,7 @@ async fn expired_saturation_keeps_per_ip_throttle_enforced_for_valid_tls() { let peer: SocketAddr = "198.51.100.110:45110".parse().unwrap(); let now = Instant::now(); - auth_probe_state_map().insert( + auth_probe_state_for_testing_in_shared(shared.as_ref()).insert( normalize_auth_probe_ip(peer.ip()), AuthProbeState { fail_streak: AUTH_PROBE_BACKOFF_START_FAILS, @@ -3569,7 +3512,7 @@ async fn expired_saturation_keeps_per_ip_throttle_enforced_for_valid_tls() { }, ); { - let mut guard = auth_probe_saturation_state() + let mut guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref()) .lock() .unwrap_or_else(|poisoned| poisoned.into_inner()); *guard = Some(AuthProbeSaturationState { @@ -3580,7 +3523,7 @@ async fn expired_saturation_keeps_per_ip_throttle_enforced_for_valid_tls() { } let valid = make_valid_tls_handshake(&secret, 0); - let result = handle_tls_handshake( + let result = handle_tls_handshake_with_shared( &valid, tokio::io::empty(), tokio::io::sink(), @@ -3589,6 +3532,7 @@ async fn expired_saturation_keeps_per_ip_throttle_enforced_for_valid_tls() { &replay_checker, &rng, None, + shared.as_ref(), ) .await; diff --git a/src/proxy/tests/handshake_timing_manual_bench_tests.rs b/src/proxy/tests/handshake_timing_manual_bench_tests.rs index 13d112c..458cb4f 100644 --- a/src/proxy/tests/handshake_timing_manual_bench_tests.rs +++ b/src/proxy/tests/handshake_timing_manual_bench_tests.rs @@ -4,12 +4,6 @@ use crate::protocol::constants::{ProtoTag, TLS_RECORD_HANDSHAKE, TLS_VERSION}; use std::net::SocketAddr; use std::time::{Duration, Instant}; -fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> { - auth_probe_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()) -} - fn make_valid_mtproto_handshake( secret_hex: &str, proto_tag: ProtoTag, @@ -149,8 +143,8 @@ fn median_ns(samples: &mut [u128]) -> u128 { #[tokio::test] #[ignore = "manual benchmark: timing-sensitive and host-dependent"] async fn mtproto_user_scan_timing_manual_benchmark() { - let _guard = auth_probe_test_guard(); - clear_auth_probe_state_for_testing(); + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); const DECOY_USERS: usize = 8_000; const ITERATIONS: usize = 250; @@ -243,7 +237,7 @@ async fn mtproto_user_scan_timing_manual_benchmark() { #[tokio::test] #[ignore = "manual benchmark: timing-sensitive and host-dependent"] async fn tls_sni_preferred_vs_no_sni_fallback_manual_benchmark() { - let _guard = auth_probe_test_guard(); + let shared = ProxySharedState::new(); const DECOY_USERS: usize = 8_000; const ITERATIONS: usize = 250; @@ -281,7 +275,7 @@ async fn tls_sni_preferred_vs_no_sni_fallback_manual_benchmark() { let no_sni = make_valid_tls_handshake(&target_secret, (i as u32).wrapping_add(10_000)); let started_sni = Instant::now(); - let sni_secrets = decode_user_secrets(&config, Some(preferred_user)); + let sni_secrets = decode_user_secrets_in(shared.as_ref(), &config, Some(preferred_user)); let sni_result = tls::validate_tls_handshake_with_replay_window( &with_sni, &sni_secrets, @@ -292,7 +286,7 @@ async fn tls_sni_preferred_vs_no_sni_fallback_manual_benchmark() { assert!(sni_result.is_some()); let started_no_sni = Instant::now(); - let no_sni_secrets = decode_user_secrets(&config, None); + let no_sni_secrets = decode_user_secrets_in(shared.as_ref(), &config, None); let no_sni_result = tls::validate_tls_handshake_with_replay_window( &no_sni, &no_sni_secrets, diff --git a/src/proxy/tests/middle_relay_baseline_invariant_tests.rs b/src/proxy/tests/middle_relay_baseline_invariant_tests.rs index 69ccd75..0a7e358 100644 --- a/src/proxy/tests/middle_relay_baseline_invariant_tests.rs +++ b/src/proxy/tests/middle_relay_baseline_invariant_tests.rs @@ -3,36 +3,39 @@ use std::time::{Duration, Instant}; #[test] fn middle_relay_baseline_public_api_idle_roundtrip_contract() { - let _guard = relay_idle_pressure_test_scope(); - clear_relay_idle_pressure_state_for_testing(); + let shared = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); - assert!(mark_relay_idle_candidate(7001)); - assert_eq!(oldest_relay_idle_candidate(), Some(7001)); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 7001)); + assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(7001)); - clear_relay_idle_candidate(7001); - assert_ne!(oldest_relay_idle_candidate(), Some(7001)); + clear_relay_idle_candidate_for_testing(shared.as_ref(), 7001); + assert_ne!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(7001)); - assert!(mark_relay_idle_candidate(7001)); - assert_eq!(oldest_relay_idle_candidate(), Some(7001)); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 7001)); + assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(7001)); - clear_relay_idle_pressure_state_for_testing(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); } #[test] fn middle_relay_baseline_public_api_desync_window_contract() { - let _guard = desync_dedup_test_lock() - .lock() - .unwrap_or_else(|poisoned| poisoned.into_inner()); - clear_desync_dedup_for_testing(); + let shared = ProxySharedState::new(); + clear_desync_dedup_for_testing_in_shared(shared.as_ref()); let key = 0xDEAD_BEEF_0000_0001u64; let t0 = Instant::now(); - assert!(should_emit_full_desync(key, false, t0)); - assert!(!should_emit_full_desync(key, false, t0 + Duration::from_secs(1))); + assert!(should_emit_full_desync_for_testing(shared.as_ref(), key, false, t0)); + assert!(!should_emit_full_desync_for_testing( + shared.as_ref(), + key, + false, + t0 + Duration::from_secs(1) + )); let t1 = t0 + DESYNC_DEDUP_WINDOW + Duration::from_millis(10); - assert!(should_emit_full_desync(key, false, t1)); + assert!(should_emit_full_desync_for_testing(shared.as_ref(), key, false, t1)); - clear_desync_dedup_for_testing(); + clear_desync_dedup_for_testing_in_shared(shared.as_ref()); } diff --git a/src/proxy/tests/middle_relay_desync_all_full_dedup_security_tests.rs b/src/proxy/tests/middle_relay_desync_all_full_dedup_security_tests.rs index dab0dff..46521e6 100644 --- a/src/proxy/tests/middle_relay_desync_all_full_dedup_security_tests.rs +++ b/src/proxy/tests/middle_relay_desync_all_full_dedup_security_tests.rs @@ -5,22 +5,20 @@ use std::thread; #[test] fn desync_all_full_bypass_does_not_initialize_or_grow_dedup_cache() { - let _guard = desync_dedup_test_lock() - .lock() - .expect("desync dedup test lock must be available"); - clear_desync_dedup_for_testing(); + let shared = ProxySharedState::new(); + clear_desync_dedup_for_testing_in_shared(shared.as_ref()); - let initial_len = DESYNC_DEDUP.get().map(|dedup| dedup.len()).unwrap_or(0); + let initial_len = desync_dedup_len_for_testing(shared.as_ref()); let now = Instant::now(); for i in 0..20_000u64 { assert!( - should_emit_full_desync(0xD35E_D000_0000_0000u64 ^ i, true, now), + should_emit_full_desync_for_testing(shared.as_ref(), 0xD35E_D000_0000_0000u64 ^ i, true, now), "desync_all_full path must always emit" ); } - let after_len = DESYNC_DEDUP.get().map(|dedup| dedup.len()).unwrap_or(0); + let after_len = desync_dedup_len_for_testing(shared.as_ref()); assert_eq!( after_len, initial_len, "desync_all_full bypass must not allocate or accumulate dedup entries" @@ -29,39 +27,34 @@ fn desync_all_full_bypass_does_not_initialize_or_grow_dedup_cache() { #[test] fn desync_all_full_bypass_keeps_existing_dedup_entries_unchanged() { - let _guard = desync_dedup_test_lock() - .lock() - .expect("desync dedup test lock must be available"); - clear_desync_dedup_for_testing(); + let shared = ProxySharedState::new(); + clear_desync_dedup_for_testing_in_shared(shared.as_ref()); - let dedup = DESYNC_DEDUP.get_or_init(DashMap::new); let seed_time = Instant::now() - Duration::from_secs(7); - dedup.insert(0xAAAABBBBCCCCDDDD, seed_time); - dedup.insert(0x1111222233334444, seed_time); + desync_dedup_insert_for_testing(shared.as_ref(), 0xAAAABBBBCCCCDDDD, seed_time); + desync_dedup_insert_for_testing(shared.as_ref(), 0x1111222233334444, seed_time); let now = Instant::now(); for i in 0..2048u64 { assert!( - should_emit_full_desync(0xF011_F000_0000_0000u64 ^ i, true, now), + should_emit_full_desync_for_testing(shared.as_ref(), 0xF011_F000_0000_0000u64 ^ i, true, now), "desync_all_full must bypass suppression and dedup refresh" ); } assert_eq!( - dedup.len(), + desync_dedup_len_for_testing(shared.as_ref()), 2, "bypass path must not mutate dedup cardinality" ); assert_eq!( - *dedup - .get(&0xAAAABBBBCCCCDDDD) + desync_dedup_get_for_testing(shared.as_ref(), 0xAAAABBBBCCCCDDDD) .expect("seed key must remain"), seed_time, "bypass path must not refresh existing dedup timestamps" ); assert_eq!( - *dedup - .get(&0x1111222233334444) + desync_dedup_get_for_testing(shared.as_ref(), 0x1111222233334444) .expect("seed key must remain"), seed_time, "bypass path must not touch unrelated dedup entries" @@ -70,14 +63,12 @@ fn desync_all_full_bypass_keeps_existing_dedup_entries_unchanged() { #[test] fn edge_all_full_burst_does_not_poison_later_false_path_tracking() { - let _guard = desync_dedup_test_lock() - .lock() - .expect("desync dedup test lock must be available"); - clear_desync_dedup_for_testing(); + let shared = ProxySharedState::new(); + clear_desync_dedup_for_testing_in_shared(shared.as_ref()); let now = Instant::now(); for i in 0..8192u64 { - assert!(should_emit_full_desync( + assert!(should_emit_full_desync_for_testing(shared.as_ref(), 0xABCD_0000_0000_0000 ^ i, true, now @@ -86,26 +77,20 @@ fn edge_all_full_burst_does_not_poison_later_false_path_tracking() { let tracked_key = 0xDEAD_BEEF_0000_0001u64; assert!( - should_emit_full_desync(tracked_key, false, now), + should_emit_full_desync_for_testing(shared.as_ref(), tracked_key, false, now), "first false-path event after all_full burst must still be tracked and emitted" ); - let dedup = DESYNC_DEDUP - .get() - .expect("false path should initialize dedup"); - assert!(dedup.get(&tracked_key).is_some()); + assert!(desync_dedup_get_for_testing(shared.as_ref(), tracked_key).is_some()); } #[test] fn adversarial_mixed_sequence_true_steps_never_change_cache_len() { - let _guard = desync_dedup_test_lock() - .lock() - .expect("desync dedup test lock must be available"); - clear_desync_dedup_for_testing(); + let shared = ProxySharedState::new(); + clear_desync_dedup_for_testing_in_shared(shared.as_ref()); - let dedup = DESYNC_DEDUP.get_or_init(DashMap::new); for i in 0..256u64 { - dedup.insert(0x1000_0000_0000_0000 ^ i, Instant::now()); + desync_dedup_insert_for_testing(shared.as_ref(), 0x1000_0000_0000_0000 ^ i, Instant::now()); } let mut seed = 0xC0DE_CAFE_BAAD_F00Du64; @@ -116,9 +101,9 @@ fn adversarial_mixed_sequence_true_steps_never_change_cache_len() { let flag_all_full = (seed & 0x1) == 1; let key = 0x7000_0000_0000_0000u64 ^ i ^ seed; - let before = dedup.len(); - let _ = should_emit_full_desync(key, flag_all_full, Instant::now()); - let after = dedup.len(); + let before = desync_dedup_len_for_testing(shared.as_ref()); + let _ = should_emit_full_desync_for_testing(shared.as_ref(), key, flag_all_full, Instant::now()); + let after = desync_dedup_len_for_testing(shared.as_ref()); if flag_all_full { assert_eq!(after, before, "all_full step must not mutate dedup length"); @@ -128,50 +113,46 @@ fn adversarial_mixed_sequence_true_steps_never_change_cache_len() { #[test] fn light_fuzz_all_full_mode_always_emits_and_stays_bounded() { - let _guard = desync_dedup_test_lock() - .lock() - .expect("desync dedup test lock must be available"); - clear_desync_dedup_for_testing(); + let shared = ProxySharedState::new(); + clear_desync_dedup_for_testing_in_shared(shared.as_ref()); let mut seed = 0x1234_5678_9ABC_DEF0u64; - let before = DESYNC_DEDUP.get().map(|d| d.len()).unwrap_or(0); + let before = desync_dedup_len_for_testing(shared.as_ref()); for _ in 0..20_000 { seed ^= seed << 7; seed ^= seed >> 9; seed ^= seed << 8; let key = seed ^ 0x55AA_55AA_55AA_55AAu64; - assert!(should_emit_full_desync(key, true, Instant::now())); + assert!(should_emit_full_desync_for_testing(shared.as_ref(), key, true, Instant::now())); } - let after = DESYNC_DEDUP.get().map(|d| d.len()).unwrap_or(0); + let after = desync_dedup_len_for_testing(shared.as_ref()); assert_eq!(after, before); assert!(after <= DESYNC_DEDUP_MAX_ENTRIES); } #[test] fn stress_parallel_all_full_storm_does_not_grow_or_mutate_cache() { - let _guard = desync_dedup_test_lock() - .lock() - .expect("desync dedup test lock must be available"); - clear_desync_dedup_for_testing(); + let shared = ProxySharedState::new(); + clear_desync_dedup_for_testing_in_shared(shared.as_ref()); - let dedup = DESYNC_DEDUP.get_or_init(DashMap::new); let seed_time = Instant::now() - Duration::from_secs(2); for i in 0..1024u64 { - dedup.insert(0x8888_0000_0000_0000 ^ i, seed_time); + desync_dedup_insert_for_testing(shared.as_ref(), 0x8888_0000_0000_0000 ^ i, seed_time); } - let before_len = dedup.len(); + let before_len = desync_dedup_len_for_testing(shared.as_ref()); let emits = Arc::new(AtomicUsize::new(0)); let mut workers = Vec::new(); for worker in 0..16u64 { let emits = Arc::clone(&emits); + let shared = shared.clone(); workers.push(thread::spawn(move || { let now = Instant::now(); for i in 0..4096u64 { let key = 0xFACE_0000_0000_0000u64 ^ (worker << 20) ^ i; - if should_emit_full_desync(key, true, now) { + if should_emit_full_desync_for_testing(shared.as_ref(), key, true, now) { emits.fetch_add(1, Ordering::Relaxed); } } @@ -184,7 +165,7 @@ fn stress_parallel_all_full_storm_does_not_grow_or_mutate_cache() { assert_eq!(emits.load(Ordering::Relaxed), 16 * 4096); assert_eq!( - dedup.len(), + desync_dedup_len_for_testing(shared.as_ref()), before_len, "parallel all_full storm must not mutate cache len" ); diff --git a/src/proxy/tests/middle_relay_idle_policy_security_tests.rs b/src/proxy/tests/middle_relay_idle_policy_security_tests.rs index fd3243d..a246640 100644 --- a/src/proxy/tests/middle_relay_idle_policy_security_tests.rs +++ b/src/proxy/tests/middle_relay_idle_policy_security_tests.rs @@ -360,73 +360,73 @@ async fn stress_many_idle_sessions_fail_closed_without_hang() { #[test] fn pressure_evicts_oldest_idle_candidate_with_deterministic_ordering() { - let _guard = relay_idle_pressure_test_scope(); - clear_relay_idle_pressure_state_for_testing(); + let shared = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); let stats = Stats::new(); - assert!(mark_relay_idle_candidate(10)); - assert!(mark_relay_idle_candidate(11)); - assert_eq!(oldest_relay_idle_candidate(), Some(10)); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 10)); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 11)); + assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(10)); - note_relay_pressure_event(); + note_relay_pressure_event_for_testing(shared.as_ref()); let mut seen_for_newer = 0u64; assert!( - !maybe_evict_idle_candidate_on_pressure(11, &mut seen_for_newer, &stats), + !maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 11, &mut seen_for_newer, &stats), "newer idle candidate must not be evicted while older candidate exists" ); - assert_eq!(oldest_relay_idle_candidate(), Some(10)); + assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(10)); let mut seen_for_oldest = 0u64; assert!( - maybe_evict_idle_candidate_on_pressure(10, &mut seen_for_oldest, &stats), + maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 10, &mut seen_for_oldest, &stats), "oldest idle candidate must be evicted first under pressure" ); - assert_eq!(oldest_relay_idle_candidate(), Some(11)); + assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(11)); assert_eq!(stats.get_relay_pressure_evict_total(), 1); - clear_relay_idle_pressure_state_for_testing(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); } #[test] fn pressure_does_not_evict_without_new_pressure_signal() { - let _guard = relay_idle_pressure_test_scope(); - clear_relay_idle_pressure_state_for_testing(); + let shared = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); let stats = Stats::new(); - assert!(mark_relay_idle_candidate(21)); - let mut seen = relay_pressure_event_seq(); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 21)); + let mut seen = relay_pressure_event_seq_for_testing(shared.as_ref()); assert!( - !maybe_evict_idle_candidate_on_pressure(21, &mut seen, &stats), + !maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 21, &mut seen, &stats), "without new pressure signal, candidate must stay" ); assert_eq!(stats.get_relay_pressure_evict_total(), 0); - assert_eq!(oldest_relay_idle_candidate(), Some(21)); + assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(21)); - clear_relay_idle_pressure_state_for_testing(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); } #[test] fn stress_pressure_eviction_preserves_fifo_across_many_candidates() { - let _guard = relay_idle_pressure_test_scope(); - clear_relay_idle_pressure_state_for_testing(); + let shared = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); let stats = Stats::new(); let mut seen_per_conn = std::collections::HashMap::new(); for conn_id in 1000u64..1064u64 { - assert!(mark_relay_idle_candidate(conn_id)); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), conn_id)); seen_per_conn.insert(conn_id, 0u64); } for expected in 1000u64..1064u64 { - note_relay_pressure_event(); + note_relay_pressure_event_for_testing(shared.as_ref()); let mut seen = *seen_per_conn .get(&expected) .expect("per-conn pressure cursor must exist"); assert!( - maybe_evict_idle_candidate_on_pressure(expected, &mut seen, &stats), + maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), expected, &mut seen, &stats), "expected conn_id {expected} must be evicted next by deterministic FIFO ordering" ); seen_per_conn.insert(expected, seen); @@ -436,33 +436,33 @@ fn stress_pressure_eviction_preserves_fifo_across_many_candidates() { } else { Some(expected + 1) }; - assert_eq!(oldest_relay_idle_candidate(), next); + assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), next); } assert_eq!(stats.get_relay_pressure_evict_total(), 64); - clear_relay_idle_pressure_state_for_testing(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); } #[test] fn blackhat_single_pressure_event_must_not_evict_more_than_one_candidate() { - let _guard = relay_idle_pressure_test_scope(); - clear_relay_idle_pressure_state_for_testing(); + let shared = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); let stats = Stats::new(); - assert!(mark_relay_idle_candidate(301)); - assert!(mark_relay_idle_candidate(302)); - assert!(mark_relay_idle_candidate(303)); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 301)); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 302)); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 303)); let mut seen_301 = 0u64; let mut seen_302 = 0u64; let mut seen_303 = 0u64; // Single pressure event should authorize at most one eviction globally. - note_relay_pressure_event(); + note_relay_pressure_event_for_testing(shared.as_ref()); - let evicted_301 = maybe_evict_idle_candidate_on_pressure(301, &mut seen_301, &stats); - let evicted_302 = maybe_evict_idle_candidate_on_pressure(302, &mut seen_302, &stats); - let evicted_303 = maybe_evict_idle_candidate_on_pressure(303, &mut seen_303, &stats); + let evicted_301 = maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 301, &mut seen_301, &stats); + let evicted_302 = maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 302, &mut seen_302, &stats); + let evicted_303 = maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 303, &mut seen_303, &stats); let evicted_total = [evicted_301, evicted_302, evicted_303] .iter() @@ -474,30 +474,30 @@ fn blackhat_single_pressure_event_must_not_evict_more_than_one_candidate() { "single pressure event must not cascade-evict multiple idle candidates" ); - clear_relay_idle_pressure_state_for_testing(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); } #[test] fn blackhat_pressure_counter_must_track_global_budget_not_per_session_cursor() { - let _guard = relay_idle_pressure_test_scope(); - clear_relay_idle_pressure_state_for_testing(); + let shared = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); let stats = Stats::new(); - assert!(mark_relay_idle_candidate(401)); - assert!(mark_relay_idle_candidate(402)); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 401)); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 402)); let mut seen_oldest = 0u64; let mut seen_next = 0u64; - note_relay_pressure_event(); + note_relay_pressure_event_for_testing(shared.as_ref()); assert!( - maybe_evict_idle_candidate_on_pressure(401, &mut seen_oldest, &stats), + maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 401, &mut seen_oldest, &stats), "oldest candidate must consume pressure budget first" ); assert!( - !maybe_evict_idle_candidate_on_pressure(402, &mut seen_next, &stats), + !maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 402, &mut seen_next, &stats), "next candidate must not consume the same pressure budget" ); @@ -507,47 +507,47 @@ fn blackhat_pressure_counter_must_track_global_budget_not_per_session_cursor() { "single pressure budget must produce exactly one eviction" ); - clear_relay_idle_pressure_state_for_testing(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); } #[test] fn blackhat_stale_pressure_before_idle_mark_must_not_trigger_eviction() { - let _guard = relay_idle_pressure_test_scope(); - clear_relay_idle_pressure_state_for_testing(); + let shared = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); let stats = Stats::new(); // Pressure happened before any idle candidate existed. - note_relay_pressure_event(); - assert!(mark_relay_idle_candidate(501)); + note_relay_pressure_event_for_testing(shared.as_ref()); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 501)); let mut seen = 0u64; assert!( - !maybe_evict_idle_candidate_on_pressure(501, &mut seen, &stats), + !maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 501, &mut seen, &stats), "stale pressure (before soft-idle mark) must not evict newly marked candidate" ); - clear_relay_idle_pressure_state_for_testing(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); } #[test] fn blackhat_stale_pressure_must_not_evict_any_of_newly_marked_batch() { - let _guard = relay_idle_pressure_test_scope(); - clear_relay_idle_pressure_state_for_testing(); + let shared = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); let stats = Stats::new(); - note_relay_pressure_event(); - assert!(mark_relay_idle_candidate(511)); - assert!(mark_relay_idle_candidate(512)); - assert!(mark_relay_idle_candidate(513)); + note_relay_pressure_event_for_testing(shared.as_ref()); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 511)); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 512)); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 513)); let mut seen_511 = 0u64; let mut seen_512 = 0u64; let mut seen_513 = 0u64; let evicted = [ - maybe_evict_idle_candidate_on_pressure(511, &mut seen_511, &stats), - maybe_evict_idle_candidate_on_pressure(512, &mut seen_512, &stats), - maybe_evict_idle_candidate_on_pressure(513, &mut seen_513, &stats), + maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 511, &mut seen_511, &stats), + maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 512, &mut seen_512, &stats), + maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 513, &mut seen_513, &stats), ] .iter() .filter(|value| **value) @@ -558,111 +558,103 @@ fn blackhat_stale_pressure_must_not_evict_any_of_newly_marked_batch() { "stale pressure event must not evict any candidate from a newly marked batch" ); - clear_relay_idle_pressure_state_for_testing(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); } #[test] fn blackhat_stale_pressure_seen_without_candidates_must_be_globally_invalidated() { - let _guard = relay_idle_pressure_test_scope(); - clear_relay_idle_pressure_state_for_testing(); + let shared = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); let stats = Stats::new(); - note_relay_pressure_event(); + note_relay_pressure_event_for_testing(shared.as_ref()); // Session A observed pressure while there were no candidates. let mut seen_a = 0u64; assert!( - !maybe_evict_idle_candidate_on_pressure(999_001, &mut seen_a, &stats), + !maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 999_001, &mut seen_a, &stats), "no candidate existed, so no eviction is possible" ); // Candidate appears later; Session B must not be able to consume stale pressure. - assert!(mark_relay_idle_candidate(521)); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 521)); let mut seen_b = 0u64; assert!( - !maybe_evict_idle_candidate_on_pressure(521, &mut seen_b, &stats), + !maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 521, &mut seen_b, &stats), "once pressure is observed with empty candidate set, it must not be replayed later" ); - clear_relay_idle_pressure_state_for_testing(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); } #[test] fn blackhat_stale_pressure_must_not_survive_candidate_churn() { - let _guard = relay_idle_pressure_test_scope(); - clear_relay_idle_pressure_state_for_testing(); + let shared = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); let stats = Stats::new(); - note_relay_pressure_event(); - assert!(mark_relay_idle_candidate(531)); - clear_relay_idle_candidate(531); - assert!(mark_relay_idle_candidate(532)); + note_relay_pressure_event_for_testing(shared.as_ref()); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 531)); + clear_relay_idle_candidate_for_testing(shared.as_ref(), 531); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 532)); let mut seen = 0u64; assert!( - !maybe_evict_idle_candidate_on_pressure(532, &mut seen, &stats), + !maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 532, &mut seen, &stats), "stale pressure must not survive clear+remark churn cycles" ); - clear_relay_idle_pressure_state_for_testing(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); } #[test] fn blackhat_pressure_seq_saturation_must_not_disable_future_pressure_accounting() { - let _guard = relay_idle_pressure_test_scope(); - clear_relay_idle_pressure_state_for_testing(); + let shared = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); { - let mut guard = relay_idle_candidate_registry() - .lock() - .expect("registry lock must be available"); - guard.pressure_event_seq = u64::MAX; - guard.pressure_consumed_seq = u64::MAX - 1; + set_relay_pressure_state_for_testing(shared.as_ref(), u64::MAX, u64::MAX - 1); } // A new pressure event should still be representable; saturating at MAX creates a permanent lockout. - note_relay_pressure_event(); - let after = relay_pressure_event_seq(); + note_relay_pressure_event_for_testing(shared.as_ref()); + let after = relay_pressure_event_seq_for_testing(shared.as_ref()); assert_ne!( after, u64::MAX, "pressure sequence saturation must not permanently freeze event progression" ); - clear_relay_idle_pressure_state_for_testing(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); } #[test] fn blackhat_pressure_seq_saturation_must_not_break_multiple_distinct_events() { - let _guard = relay_idle_pressure_test_scope(); - clear_relay_idle_pressure_state_for_testing(); + let shared = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); { - let mut guard = relay_idle_candidate_registry() - .lock() - .expect("registry lock must be available"); - guard.pressure_event_seq = u64::MAX; - guard.pressure_consumed_seq = u64::MAX; + set_relay_pressure_state_for_testing(shared.as_ref(), u64::MAX, u64::MAX); } - note_relay_pressure_event(); - let first = relay_pressure_event_seq(); - note_relay_pressure_event(); - let second = relay_pressure_event_seq(); + note_relay_pressure_event_for_testing(shared.as_ref()); + let first = relay_pressure_event_seq_for_testing(shared.as_ref()); + note_relay_pressure_event_for_testing(shared.as_ref()); + let second = relay_pressure_event_seq_for_testing(shared.as_ref()); assert!( second > first, "distinct pressure events must remain distinguishable even at sequence boundary" ); - clear_relay_idle_pressure_state_for_testing(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn integration_race_single_pressure_event_allows_at_most_one_eviction_under_parallel_claims() { - let _guard = relay_idle_pressure_test_scope(); - clear_relay_idle_pressure_state_for_testing(); + let shared = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); let stats = Arc::new(Stats::new()); let sessions = 16usize; @@ -671,20 +663,21 @@ async fn integration_race_single_pressure_event_allows_at_most_one_eviction_unde let mut seen_per_session = vec![0u64; sessions]; for conn_id in &conn_ids { - assert!(mark_relay_idle_candidate(*conn_id)); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), *conn_id)); } for round in 0..rounds { - note_relay_pressure_event(); + note_relay_pressure_event_for_testing(shared.as_ref()); let mut joins = Vec::with_capacity(sessions); for (idx, conn_id) in conn_ids.iter().enumerate() { let mut seen = seen_per_session[idx]; let conn_id = *conn_id; let stats = stats.clone(); + let shared = shared.clone(); joins.push(tokio::spawn(async move { let evicted = - maybe_evict_idle_candidate_on_pressure(conn_id, &mut seen, stats.as_ref()); + maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), conn_id, &mut seen, stats.as_ref()); (idx, conn_id, seen, evicted) })); } @@ -706,7 +699,7 @@ async fn integration_race_single_pressure_event_allows_at_most_one_eviction_unde ); if let Some(conn) = evicted_conn { assert!( - mark_relay_idle_candidate(conn), + mark_relay_idle_candidate_for_testing(shared.as_ref(), conn), "round {round}: evicted conn must be re-markable as idle candidate" ); } @@ -721,13 +714,13 @@ async fn integration_race_single_pressure_event_allows_at_most_one_eviction_unde "parallel race must still observe at least one successful eviction" ); - clear_relay_idle_pressure_state_for_testing(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalidation_and_budget() { - let _guard = relay_idle_pressure_test_scope(); - clear_relay_idle_pressure_state_for_testing(); + let shared = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); let stats = Arc::new(Stats::new()); let sessions = 12usize; @@ -736,7 +729,7 @@ async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalida let mut seen_per_session = vec![0u64; sessions]; for conn_id in &conn_ids { - assert!(mark_relay_idle_candidate(*conn_id)); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), *conn_id)); } let mut expected_total_evictions = 0u64; @@ -745,20 +738,21 @@ async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalida let empty_phase = round % 5 == 0; if empty_phase { for conn_id in &conn_ids { - clear_relay_idle_candidate(*conn_id); + clear_relay_idle_candidate_for_testing(shared.as_ref(), *conn_id); } } - note_relay_pressure_event(); + note_relay_pressure_event_for_testing(shared.as_ref()); let mut joins = Vec::with_capacity(sessions); for (idx, conn_id) in conn_ids.iter().enumerate() { let mut seen = seen_per_session[idx]; let conn_id = *conn_id; let stats = stats.clone(); + let shared = shared.clone(); joins.push(tokio::spawn(async move { let evicted = - maybe_evict_idle_candidate_on_pressure(conn_id, &mut seen, stats.as_ref()); + maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), conn_id, &mut seen, stats.as_ref()); (idx, conn_id, seen, evicted) })); } @@ -780,7 +774,7 @@ async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalida "round {round}: empty candidate phase must not allow stale-pressure eviction" ); for conn_id in &conn_ids { - assert!(mark_relay_idle_candidate(*conn_id)); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), *conn_id)); } } else { assert!( @@ -789,7 +783,7 @@ async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalida ); if let Some(conn_id) = evicted_conn { expected_total_evictions = expected_total_evictions.saturating_add(1); - assert!(mark_relay_idle_candidate(conn_id)); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), conn_id)); } } } @@ -800,5 +794,5 @@ async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalida "global pressure eviction counter must match observed per-round successful consumes" ); - clear_relay_idle_pressure_state_for_testing(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); } diff --git a/src/proxy/tests/middle_relay_idle_registry_poison_security_tests.rs b/src/proxy/tests/middle_relay_idle_registry_poison_security_tests.rs index b43825c..ce908da 100644 --- a/src/proxy/tests/middle_relay_idle_registry_poison_security_tests.rs +++ b/src/proxy/tests/middle_relay_idle_registry_poison_security_tests.rs @@ -3,12 +3,13 @@ use std::panic::{AssertUnwindSafe, catch_unwind}; #[test] fn blackhat_registry_poison_recovers_with_fail_closed_reset_and_pressure_accounting() { - let _guard = relay_idle_pressure_test_scope(); - clear_relay_idle_pressure_state_for_testing(); + let shared = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); let _ = catch_unwind(AssertUnwindSafe(|| { - let registry = relay_idle_candidate_registry(); - let mut guard = registry + let mut guard = shared + .middle_relay + .relay_idle_registry .lock() .expect("registry lock must be acquired before poison"); guard.by_conn_id.insert( @@ -23,40 +24,41 @@ fn blackhat_registry_poison_recovers_with_fail_closed_reset_and_pressure_account })); // Helper lock must recover from poison, reset stale state, and continue. - assert!(mark_relay_idle_candidate(42)); - assert_eq!(oldest_relay_idle_candidate(), Some(42)); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 42)); + assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(42)); - let before = relay_pressure_event_seq(); - note_relay_pressure_event(); - let after = relay_pressure_event_seq(); + let before = relay_pressure_event_seq_for_testing(shared.as_ref()); + note_relay_pressure_event_for_testing(shared.as_ref()); + let after = relay_pressure_event_seq_for_testing(shared.as_ref()); assert!( after > before, "pressure accounting must still advance after poison" ); - clear_relay_idle_pressure_state_for_testing(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); } #[test] fn clear_state_helper_must_reset_poisoned_registry_for_deterministic_fifo_tests() { - let _guard = relay_idle_pressure_test_scope(); - clear_relay_idle_pressure_state_for_testing(); + let shared = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); let _ = catch_unwind(AssertUnwindSafe(|| { - let registry = relay_idle_candidate_registry(); - let _guard = registry + let _guard = shared + .middle_relay + .relay_idle_registry .lock() .expect("registry lock must be acquired before poison"); panic!("intentional poison while lock held"); })); - clear_relay_idle_pressure_state_for_testing(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); - assert_eq!(oldest_relay_idle_candidate(), None); - assert_eq!(relay_pressure_event_seq(), 0); + assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), None); + assert_eq!(relay_pressure_event_seq_for_testing(shared.as_ref()), 0); - assert!(mark_relay_idle_candidate(7)); - assert_eq!(oldest_relay_idle_candidate(), Some(7)); + assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 7)); + assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(7)); - clear_relay_idle_pressure_state_for_testing(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); } diff --git a/src/proxy/tests/middle_relay_stub_completion_security_tests.rs b/src/proxy/tests/middle_relay_stub_completion_security_tests.rs index 978ee29..54eb784 100644 --- a/src/proxy/tests/middle_relay_stub_completion_security_tests.rs +++ b/src/proxy/tests/middle_relay_stub_completion_security_tests.rs @@ -1,7 +1,6 @@ use super::*; use crate::stats::Stats; use crate::stream::BufferPool; -use std::collections::HashSet; use std::sync::Arc; use tokio::time::{Duration as TokioDuration, timeout}; @@ -16,32 +15,30 @@ fn make_pooled_payload(data: &[u8]) -> PooledBuffer { #[test] #[ignore = "Tracking for M-04: Verify should_emit_full_desync returns true on first occurrence and false on duplicate within window"] fn should_emit_full_desync_filters_duplicates() { - let _guard = desync_dedup_test_lock() - .lock() - .expect("desync dedup test lock must be available"); - clear_desync_dedup_for_testing(); + let shared = ProxySharedState::new(); + clear_desync_dedup_for_testing_in_shared(shared.as_ref()); let key = 0x4D04_0000_0000_0001_u64; let base = Instant::now(); assert!( - should_emit_full_desync(key, false, base), + should_emit_full_desync_for_testing(shared.as_ref(), key, false, base), "first occurrence must emit full forensic record" ); assert!( - !should_emit_full_desync(key, false, base), + !should_emit_full_desync_for_testing(shared.as_ref(), key, false, base), "duplicate at same timestamp must be suppressed" ); let within_window = base + DESYNC_DEDUP_WINDOW - TokioDuration::from_millis(1); assert!( - !should_emit_full_desync(key, false, within_window), + !should_emit_full_desync_for_testing(shared.as_ref(), key, false, within_window), "duplicate strictly inside dedup window must stay suppressed" ); let on_window_edge = base + DESYNC_DEDUP_WINDOW; assert!( - should_emit_full_desync(key, false, on_window_edge), + should_emit_full_desync_for_testing(shared.as_ref(), key, false, on_window_edge), "duplicate at window boundary must re-emit and refresh" ); } @@ -49,39 +46,34 @@ fn should_emit_full_desync_filters_duplicates() { #[test] #[ignore = "Tracking for M-04: Verify desync dedup eviction behaves correctly under map-full condition"] fn desync_dedup_eviction_under_map_full_condition() { - let _guard = desync_dedup_test_lock() - .lock() - .expect("desync dedup test lock must be available"); - clear_desync_dedup_for_testing(); + let shared = ProxySharedState::new(); + clear_desync_dedup_for_testing_in_shared(shared.as_ref()); let base = Instant::now(); for key in 0..DESYNC_DEDUP_MAX_ENTRIES as u64 { assert!( - should_emit_full_desync(key, false, base), + should_emit_full_desync_for_testing(shared.as_ref(), key, false, base), "unique key should be inserted while warming dedup cache" ); } - let dedup = DESYNC_DEDUP - .get() - .expect("dedup map must exist after warm-up insertions"); assert_eq!( - dedup.len(), + desync_dedup_len_for_testing(shared.as_ref()), DESYNC_DEDUP_MAX_ENTRIES, "cache warm-up must reach exact hard cap" ); - let before_keys: HashSet = dedup.iter().map(|entry| *entry.key()).collect(); + let before_keys = desync_dedup_keys_for_testing(shared.as_ref()); let newcomer_key = 0x4D04_FFFF_FFFF_0001_u64; assert!( - should_emit_full_desync(newcomer_key, false, base), + should_emit_full_desync_for_testing(shared.as_ref(), newcomer_key, false, base), "first newcomer at map-full must emit under bounded full-cache gate" ); - let after_keys: HashSet = dedup.iter().map(|entry| *entry.key()).collect(); + let after_keys = desync_dedup_keys_for_testing(shared.as_ref()); assert_eq!( - dedup.len(), + desync_dedup_len_for_testing(shared.as_ref()), DESYNC_DEDUP_MAX_ENTRIES, "map-full insertion must preserve hard capacity bound" ); @@ -102,7 +94,7 @@ fn desync_dedup_eviction_under_map_full_condition() { ); assert!( - !should_emit_full_desync(newcomer_key, false, base), + !should_emit_full_desync_for_testing(shared.as_ref(), newcomer_key, false, base), "immediate duplicate newcomer must remain suppressed" ); } diff --git a/src/proxy/tests/proxy_shared_state_isolation_tests.rs b/src/proxy/tests/proxy_shared_state_isolation_tests.rs new file mode 100644 index 0000000..3e26000 --- /dev/null +++ b/src/proxy/tests/proxy_shared_state_isolation_tests.rs @@ -0,0 +1,608 @@ +use crate::proxy::handshake::{ + auth_probe_fail_streak_for_testing_in_shared, auth_probe_is_throttled_for_testing_in_shared, + auth_probe_record_failure_for_testing, clear_auth_probe_state_for_testing_in_shared, + clear_unknown_sni_warn_state_for_testing_in_shared, clear_warned_secrets_for_testing_in_shared, + should_emit_unknown_sni_warn_for_testing_in_shared, warned_secrets_for_testing_in_shared, +}; +use crate::proxy::client::handle_client_stream_with_shared; +use crate::proxy::middle_relay::{ + clear_desync_dedup_for_testing_in_shared, clear_relay_idle_candidate_for_testing, + clear_relay_idle_pressure_state_for_testing_in_shared, mark_relay_idle_candidate_for_testing, + maybe_evict_idle_candidate_on_pressure_for_testing, note_relay_pressure_event_for_testing, + oldest_relay_idle_candidate_for_testing, relay_idle_mark_seq_for_testing, + relay_pressure_event_seq_for_testing, should_emit_full_desync_for_testing, +}; +use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController}; +use crate::proxy::shared_state::ProxySharedState; +use crate::{ + config::{ProxyConfig, UpstreamConfig, UpstreamType}, + crypto::SecureRandom, + ip_tracker::UserIpTracker, + stats::{ReplayChecker, Stats, beobachten::BeobachtenStore}, + stream::BufferPool, + transport::UpstreamManager, +}; +use std::net::{IpAddr, Ipv4Addr}; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use tokio::io::{AsyncWriteExt, duplex}; +use tokio::sync::Barrier; + +struct ClientHarness { + config: Arc, + stats: Arc, + upstream_manager: Arc, + replay_checker: Arc, + buffer_pool: Arc, + rng: Arc, + route_runtime: Arc, + ip_tracker: Arc, + beobachten: Arc, +} + +fn new_client_harness() -> ClientHarness { + let mut cfg = ProxyConfig::default(); + cfg.censorship.mask = false; + cfg.general.modes.classic = true; + cfg.general.modes.secure = true; + let config = Arc::new(cfg); + let stats = Arc::new(Stats::new()); + + let upstream_manager = Arc::new(UpstreamManager::new( + vec![UpstreamConfig { + upstream_type: UpstreamType::Direct { + interface: None, + bind_addresses: None, + }, + weight: 1, + enabled: true, + scopes: String::new(), + selected_scope: String::new(), + }], + 1, + 1, + 1, + 10, + 1, + false, + stats.clone(), + )); + + ClientHarness { + config, + stats, + upstream_manager, + replay_checker: Arc::new(ReplayChecker::new(128, Duration::from_secs(60))), + buffer_pool: Arc::new(BufferPool::new()), + rng: Arc::new(SecureRandom::new()), + route_runtime: Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)), + ip_tracker: Arc::new(UserIpTracker::new()), + beobachten: Arc::new(BeobachtenStore::new()), + } +} + +async fn drive_invalid_mtproto_handshake(shared: Arc, peer: std::net::SocketAddr) { + let harness = new_client_harness(); + let (server_side, mut client_side) = duplex(4096); + let invalid = [0u8; 64]; + + let task = tokio::spawn(handle_client_stream_with_shared( + server_side, + peer, + harness.config, + harness.stats, + harness.upstream_manager, + harness.replay_checker, + harness.buffer_pool, + harness.rng, + None, + harness.route_runtime, + None, + harness.ip_tracker, + harness.beobachten, + shared, + false, + )); + + client_side + .write_all(&invalid) + .await + .expect("failed to write invalid handshake"); + client_side.shutdown().await.expect("failed to shutdown client"); + let _ = tokio::time::timeout(Duration::from_secs(3), task) + .await + .expect("client task timed out") + .expect("client task join failed"); +} + +#[test] +fn proxy_shared_state_two_instances_do_not_share_auth_probe_state() { + let a = ProxySharedState::new(); + let b = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(a.as_ref()); + + let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 10)); + auth_probe_record_failure_for_testing(a.as_ref(), ip, Instant::now()); + + assert_eq!( + auth_probe_fail_streak_for_testing_in_shared(a.as_ref(), ip), + Some(1) + ); + assert_eq!(auth_probe_fail_streak_for_testing_in_shared(b.as_ref(), ip), None); +} + +#[test] +fn proxy_shared_state_two_instances_do_not_share_desync_dedup() { + let a = ProxySharedState::new(); + let b = ProxySharedState::new(); + clear_desync_dedup_for_testing_in_shared(a.as_ref()); + + let now = Instant::now(); + let key = 0xA5A5_u64; + assert!(should_emit_full_desync_for_testing(a.as_ref(), key, false, now)); + assert!(should_emit_full_desync_for_testing(b.as_ref(), key, false, now)); +} + +#[test] +fn proxy_shared_state_two_instances_do_not_share_idle_registry() { + let a = ProxySharedState::new(); + let b = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(a.as_ref()); + + assert!(mark_relay_idle_candidate_for_testing(a.as_ref(), 111)); + assert_eq!(oldest_relay_idle_candidate_for_testing(a.as_ref()), Some(111)); + assert_eq!(oldest_relay_idle_candidate_for_testing(b.as_ref()), None); +} + +#[test] +fn proxy_shared_state_reset_in_one_instance_does_not_affect_another() { + let a = ProxySharedState::new(); + let b = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(a.as_ref()); + + let ip_a = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 1)); + let ip_b = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 2)); + let now = Instant::now(); + + auth_probe_record_failure_for_testing(a.as_ref(), ip_a, now); + auth_probe_record_failure_for_testing(b.as_ref(), ip_b, now); + clear_auth_probe_state_for_testing_in_shared(a.as_ref()); + + assert_eq!(auth_probe_fail_streak_for_testing_in_shared(a.as_ref(), ip_a), None); + assert_eq!( + auth_probe_fail_streak_for_testing_in_shared(b.as_ref(), ip_b), + Some(1) + ); +} + +#[test] +fn proxy_shared_state_parallel_auth_probe_updates_stay_per_instance() { + let a = ProxySharedState::new(); + let b = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(a.as_ref()); + + let ip = IpAddr::V4(Ipv4Addr::new(192, 0, 2, 77)); + let now = Instant::now(); + + for _ in 0..5 { + auth_probe_record_failure_for_testing(a.as_ref(), ip, now); + } + for _ in 0..3 { + auth_probe_record_failure_for_testing(b.as_ref(), ip, now + Duration::from_millis(1)); + } + + assert_eq!(auth_probe_fail_streak_for_testing_in_shared(a.as_ref(), ip), Some(5)); + assert_eq!(auth_probe_fail_streak_for_testing_in_shared(b.as_ref(), ip), Some(3)); +} + +#[tokio::test] +async fn proxy_shared_state_client_pipeline_records_probe_failures_in_instance_state() { + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); + let peer_ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 200)); + let peer = std::net::SocketAddr::new(peer_ip, 54001); + + drive_invalid_mtproto_handshake(shared.clone(), peer).await; + + assert_eq!( + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer_ip), + Some(1), + "invalid handshake in client pipeline must update injected shared auth-probe state" + ); +} + +#[tokio::test] +async fn proxy_shared_state_client_pipeline_keeps_auth_probe_isolated_between_instances() { + let shared_a = ProxySharedState::new(); + let shared_b = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared_a.as_ref()); + clear_auth_probe_state_for_testing_in_shared(shared_b.as_ref()); + + let peer_a_ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 210)); + let peer_b_ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 211)); + + drive_invalid_mtproto_handshake( + shared_a.clone(), + std::net::SocketAddr::new(peer_a_ip, 54110), + ) + .await; + drive_invalid_mtproto_handshake( + shared_b.clone(), + std::net::SocketAddr::new(peer_b_ip, 54111), + ) + .await; + + assert_eq!( + auth_probe_fail_streak_for_testing_in_shared(shared_a.as_ref(), peer_a_ip), + Some(1) + ); + assert_eq!( + auth_probe_fail_streak_for_testing_in_shared(shared_b.as_ref(), peer_b_ip), + Some(1) + ); + assert_eq!( + auth_probe_fail_streak_for_testing_in_shared(shared_a.as_ref(), peer_b_ip), + None + ); + assert_eq!( + auth_probe_fail_streak_for_testing_in_shared(shared_b.as_ref(), peer_a_ip), + None + ); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn proxy_shared_state_client_pipeline_high_contention_same_ip_stays_lossless_per_instance() { + let shared_a = ProxySharedState::new(); + let shared_b = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared_a.as_ref()); + clear_auth_probe_state_for_testing_in_shared(shared_b.as_ref()); + + let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 250)); + let workers = 48u16; + let barrier = Arc::new(Barrier::new((workers as usize) * 2)); + let mut tasks = Vec::new(); + + for i in 0..workers { + let shared_a = shared_a.clone(); + let barrier_a = barrier.clone(); + let peer_a = std::net::SocketAddr::new(ip, 56000 + i); + tasks.push(tokio::spawn(async move { + barrier_a.wait().await; + drive_invalid_mtproto_handshake(shared_a, peer_a).await; + })); + + let shared_b = shared_b.clone(); + let barrier_b = barrier.clone(); + let peer_b = std::net::SocketAddr::new(ip, 56100 + i); + tasks.push(tokio::spawn(async move { + barrier_b.wait().await; + drive_invalid_mtproto_handshake(shared_b, peer_b).await; + })); + } + + for task in tasks { + task.await.expect("pipeline task join failed"); + } + + let streak_a = auth_probe_fail_streak_for_testing_in_shared(shared_a.as_ref(), ip) + .expect("instance A must track probe failures"); + let streak_b = auth_probe_fail_streak_for_testing_in_shared(shared_b.as_ref(), ip) + .expect("instance B must track probe failures"); + + assert!(streak_a > 0); + assert!(streak_b > 0); + + clear_auth_probe_state_for_testing_in_shared(shared_a.as_ref()); + assert_eq!( + auth_probe_fail_streak_for_testing_in_shared(shared_a.as_ref(), ip), + None, + "clearing one instance must reset only that instance" + ); + assert!( + auth_probe_fail_streak_for_testing_in_shared(shared_b.as_ref(), ip).is_some(), + "clearing one instance must not clear the other instance" + ); +} + +#[test] +fn proxy_shared_state_auth_saturation_does_not_bleed_across_instances() { + let a = ProxySharedState::new(); + let b = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(a.as_ref()); + clear_auth_probe_state_for_testing_in_shared(b.as_ref()); + + let ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 77)); + let future_now = Instant::now() + Duration::from_secs(1); + for _ in 0..8 { + auth_probe_record_failure_for_testing(a.as_ref(), ip, future_now); + } + + assert!(auth_probe_is_throttled_for_testing_in_shared(a.as_ref(), ip)); + assert!(!auth_probe_is_throttled_for_testing_in_shared(b.as_ref(), ip)); +} + +#[test] +fn proxy_shared_state_poison_clear_in_one_instance_does_not_affect_other_instance() { + let a = ProxySharedState::new(); + let b = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(a.as_ref()); + clear_auth_probe_state_for_testing_in_shared(b.as_ref()); + + let ip_a = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 31)); + let ip_b = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 32)); + let now = Instant::now(); + + auth_probe_record_failure_for_testing(a.as_ref(), ip_a, now); + auth_probe_record_failure_for_testing(b.as_ref(), ip_b, now); + + let a_for_poison = a.clone(); + let _ = std::thread::spawn(move || { + let _hold = a_for_poison + .handshake + .auth_probe_saturation + .lock() + .unwrap_or_else(|poisoned| poisoned.into_inner()); + panic!("intentional poison for per-instance isolation regression coverage"); + }) + .join(); + + clear_auth_probe_state_for_testing_in_shared(a.as_ref()); + + assert_eq!(auth_probe_fail_streak_for_testing_in_shared(a.as_ref(), ip_a), None); + assert_eq!( + auth_probe_fail_streak_for_testing_in_shared(b.as_ref(), ip_b), + Some(1), + "poison recovery and clear in one instance must not touch other instance state" + ); +} + +#[test] +fn proxy_shared_state_unknown_sni_cooldown_does_not_bleed_across_instances() { + let a = ProxySharedState::new(); + let b = ProxySharedState::new(); + clear_unknown_sni_warn_state_for_testing_in_shared(a.as_ref()); + clear_unknown_sni_warn_state_for_testing_in_shared(b.as_ref()); + + let now = Instant::now(); + assert!(should_emit_unknown_sni_warn_for_testing_in_shared( + a.as_ref(), + now + )); + assert!(should_emit_unknown_sni_warn_for_testing_in_shared( + b.as_ref(), + now + )); +} + +#[test] +fn proxy_shared_state_warned_secret_cache_does_not_bleed_across_instances() { + let a = ProxySharedState::new(); + let b = ProxySharedState::new(); + clear_warned_secrets_for_testing_in_shared(a.as_ref()); + clear_warned_secrets_for_testing_in_shared(b.as_ref()); + + let key = ("isolation-user".to_string(), "invalid_hex".to_string()); + { + let warned = warned_secrets_for_testing_in_shared(a.as_ref()); + let mut guard = warned + .lock() + .unwrap_or_else(|poisoned| poisoned.into_inner()); + guard.insert(key.clone()); + } + + let contains_in_a = { + let warned = warned_secrets_for_testing_in_shared(a.as_ref()); + let guard = warned + .lock() + .unwrap_or_else(|poisoned| poisoned.into_inner()); + guard.contains(&key) + }; + let contains_in_b = { + let warned = warned_secrets_for_testing_in_shared(b.as_ref()); + let guard = warned + .lock() + .unwrap_or_else(|poisoned| poisoned.into_inner()); + guard.contains(&key) + }; + + assert!(contains_in_a); + assert!(!contains_in_b); +} + +#[test] +fn proxy_shared_state_idle_mark_seq_is_per_instance() { + let a = ProxySharedState::new(); + let b = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(a.as_ref()); + clear_relay_idle_pressure_state_for_testing_in_shared(b.as_ref()); + + assert_eq!(relay_idle_mark_seq_for_testing(a.as_ref()), 0); + assert_eq!(relay_idle_mark_seq_for_testing(b.as_ref()), 0); + + assert!(mark_relay_idle_candidate_for_testing(a.as_ref(), 9001)); + assert_eq!(relay_idle_mark_seq_for_testing(a.as_ref()), 1); + assert_eq!(relay_idle_mark_seq_for_testing(b.as_ref()), 0); + + assert!(mark_relay_idle_candidate_for_testing(b.as_ref(), 9002)); + assert_eq!(relay_idle_mark_seq_for_testing(a.as_ref()), 1); + assert_eq!(relay_idle_mark_seq_for_testing(b.as_ref()), 1); +} + +#[test] +fn proxy_shared_state_unknown_sni_clear_in_one_instance_does_not_reset_other() { + let a = ProxySharedState::new(); + let b = ProxySharedState::new(); + clear_unknown_sni_warn_state_for_testing_in_shared(a.as_ref()); + clear_unknown_sni_warn_state_for_testing_in_shared(b.as_ref()); + + let now = Instant::now(); + assert!(should_emit_unknown_sni_warn_for_testing_in_shared( + a.as_ref(), + now + )); + assert!(should_emit_unknown_sni_warn_for_testing_in_shared( + b.as_ref(), + now + )); + + clear_unknown_sni_warn_state_for_testing_in_shared(a.as_ref()); + assert!(should_emit_unknown_sni_warn_for_testing_in_shared( + a.as_ref(), + now + Duration::from_millis(1) + )); + assert!(!should_emit_unknown_sni_warn_for_testing_in_shared( + b.as_ref(), + now + Duration::from_millis(1) + )); +} + +#[test] +fn proxy_shared_state_warned_secret_clear_in_one_instance_does_not_clear_other() { + let a = ProxySharedState::new(); + let b = ProxySharedState::new(); + clear_warned_secrets_for_testing_in_shared(a.as_ref()); + clear_warned_secrets_for_testing_in_shared(b.as_ref()); + + let key = ("clear-isolation-user".to_string(), "invalid_length".to_string()); + { + let warned_a = warned_secrets_for_testing_in_shared(a.as_ref()); + let mut guard_a = warned_a + .lock() + .unwrap_or_else(|poisoned| poisoned.into_inner()); + guard_a.insert(key.clone()); + + let warned_b = warned_secrets_for_testing_in_shared(b.as_ref()); + let mut guard_b = warned_b + .lock() + .unwrap_or_else(|poisoned| poisoned.into_inner()); + guard_b.insert(key.clone()); + } + + clear_warned_secrets_for_testing_in_shared(a.as_ref()); + + let has_a = { + let warned = warned_secrets_for_testing_in_shared(a.as_ref()); + let guard = warned + .lock() + .unwrap_or_else(|poisoned| poisoned.into_inner()); + guard.contains(&key) + }; + let has_b = { + let warned = warned_secrets_for_testing_in_shared(b.as_ref()); + let guard = warned + .lock() + .unwrap_or_else(|poisoned| poisoned.into_inner()); + guard.contains(&key) + }; + + assert!(!has_a); + assert!(has_b); +} + +#[test] +fn proxy_shared_state_desync_duplicate_suppression_is_instance_scoped() { + let a = ProxySharedState::new(); + let b = ProxySharedState::new(); + clear_desync_dedup_for_testing_in_shared(a.as_ref()); + clear_desync_dedup_for_testing_in_shared(b.as_ref()); + + let now = Instant::now(); + let key = 0xBEEF_0000_0000_0001u64; + assert!(should_emit_full_desync_for_testing(a.as_ref(), key, false, now)); + assert!(!should_emit_full_desync_for_testing( + a.as_ref(), + key, + false, + now + Duration::from_millis(1) + )); + assert!(should_emit_full_desync_for_testing(b.as_ref(), key, false, now)); +} + +#[test] +fn proxy_shared_state_desync_clear_in_one_instance_does_not_clear_other() { + let a = ProxySharedState::new(); + let b = ProxySharedState::new(); + clear_desync_dedup_for_testing_in_shared(a.as_ref()); + clear_desync_dedup_for_testing_in_shared(b.as_ref()); + + let now = Instant::now(); + let key = 0xCAFE_0000_0000_0001u64; + assert!(should_emit_full_desync_for_testing(a.as_ref(), key, false, now)); + assert!(should_emit_full_desync_for_testing(b.as_ref(), key, false, now)); + + clear_desync_dedup_for_testing_in_shared(a.as_ref()); + + assert!(should_emit_full_desync_for_testing( + a.as_ref(), + key, + false, + now + Duration::from_millis(2) + )); + assert!(!should_emit_full_desync_for_testing( + b.as_ref(), + key, + false, + now + Duration::from_millis(2) + )); +} + +#[test] +fn proxy_shared_state_idle_candidate_clear_in_one_instance_does_not_affect_other() { + let a = ProxySharedState::new(); + let b = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(a.as_ref()); + clear_relay_idle_pressure_state_for_testing_in_shared(b.as_ref()); + + assert!(mark_relay_idle_candidate_for_testing(a.as_ref(), 1001)); + assert!(mark_relay_idle_candidate_for_testing(b.as_ref(), 2002)); + clear_relay_idle_candidate_for_testing(a.as_ref(), 1001); + + assert_eq!(oldest_relay_idle_candidate_for_testing(a.as_ref()), None); + assert_eq!(oldest_relay_idle_candidate_for_testing(b.as_ref()), Some(2002)); +} + +#[test] +fn proxy_shared_state_pressure_seq_increments_are_instance_scoped() { + let a = ProxySharedState::new(); + let b = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(a.as_ref()); + clear_relay_idle_pressure_state_for_testing_in_shared(b.as_ref()); + + assert_eq!(relay_pressure_event_seq_for_testing(a.as_ref()), 0); + assert_eq!(relay_pressure_event_seq_for_testing(b.as_ref()), 0); + + note_relay_pressure_event_for_testing(a.as_ref()); + note_relay_pressure_event_for_testing(a.as_ref()); + + assert_eq!(relay_pressure_event_seq_for_testing(a.as_ref()), 2); + assert_eq!(relay_pressure_event_seq_for_testing(b.as_ref()), 0); +} + +#[test] +fn proxy_shared_state_pressure_consumption_does_not_cross_instances() { + let a = ProxySharedState::new(); + let b = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(a.as_ref()); + clear_relay_idle_pressure_state_for_testing_in_shared(b.as_ref()); + + assert!(mark_relay_idle_candidate_for_testing(a.as_ref(), 7001)); + assert!(mark_relay_idle_candidate_for_testing(b.as_ref(), 7001)); + note_relay_pressure_event_for_testing(a.as_ref()); + + let stats = Stats::new(); + let mut seen_a = 0u64; + let mut seen_b = 0u64; + + assert!(maybe_evict_idle_candidate_on_pressure_for_testing( + a.as_ref(), + 7001, + &mut seen_a, + &stats + )); + assert!(!maybe_evict_idle_candidate_on_pressure_for_testing( + b.as_ref(), + 7001, + &mut seen_b, + &stats + )); +} diff --git a/src/proxy/tests/proxy_shared_state_parallel_execution_tests.rs b/src/proxy/tests/proxy_shared_state_parallel_execution_tests.rs new file mode 100644 index 0000000..45da59a --- /dev/null +++ b/src/proxy/tests/proxy_shared_state_parallel_execution_tests.rs @@ -0,0 +1,255 @@ +use crate::proxy::handshake::{ + auth_probe_fail_streak_for_testing_in_shared, auth_probe_record_failure_for_testing, + clear_auth_probe_state_for_testing_in_shared, clear_unknown_sni_warn_state_for_testing_in_shared, + should_emit_unknown_sni_warn_for_testing_in_shared, +}; +use crate::proxy::middle_relay::{ + clear_desync_dedup_for_testing_in_shared, clear_relay_idle_pressure_state_for_testing_in_shared, + mark_relay_idle_candidate_for_testing, oldest_relay_idle_candidate_for_testing, + should_emit_full_desync_for_testing, +}; +use crate::proxy::shared_state::ProxySharedState; +use rand::SeedableRng; +use rand::RngExt; +use rand::rngs::StdRng; +use std::net::{IpAddr, Ipv4Addr}; +use std::sync::Arc; +use std::time::Instant; +use tokio::sync::Barrier; + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn proxy_shared_state_50_concurrent_instances_no_counter_bleed() { + let mut handles = Vec::new(); + for i in 0..50_u8 { + handles.push(tokio::spawn(async move { + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); + let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 200)); + auth_probe_record_failure_for_testing(shared.as_ref(), ip, Instant::now()); + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), ip) + })); + } + + for handle in handles { + let streak = handle.await.expect("task join failed"); + assert_eq!(streak, Some(1)); + } +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn proxy_shared_state_desync_rotation_concurrent_20_instances() { + let now = Instant::now(); + let key = 0xD35E_D35E_u64; + let mut handles = Vec::new(); + for _ in 0..20_u64 { + handles.push(tokio::spawn(async move { + let shared = ProxySharedState::new(); + clear_desync_dedup_for_testing_in_shared(shared.as_ref()); + should_emit_full_desync_for_testing(shared.as_ref(), key, false, now) + })); + } + + for handle in handles { + let emitted = handle.await.expect("task join failed"); + assert!(emitted); + } +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn proxy_shared_state_idle_registry_concurrent_10_instances() { + let mut handles = Vec::new(); + let conn_id = 42_u64; + for _ in 1..=10_u64 { + handles.push(tokio::spawn(async move { + let shared = ProxySharedState::new(); + clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref()); + let marked = mark_relay_idle_candidate_for_testing(shared.as_ref(), conn_id); + let oldest = oldest_relay_idle_candidate_for_testing(shared.as_ref()); + (marked, oldest) + })); + } + + for (i, handle) in handles.into_iter().enumerate() { + let (marked, oldest) = handle.await.expect("task join failed"); + assert!(marked, "instance {} failed to mark", i); + assert_eq!(oldest, Some(conn_id)); + } +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn proxy_shared_state_dual_instance_same_ip_high_contention_no_counter_bleed() { + let a = ProxySharedState::new(); + let b = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(a.as_ref()); + clear_auth_probe_state_for_testing_in_shared(b.as_ref()); + + let ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 200)); + let mut handles = Vec::new(); + + for _ in 0..64 { + let a = a.clone(); + let b = b.clone(); + handles.push(tokio::spawn(async move { + auth_probe_record_failure_for_testing(a.as_ref(), ip, Instant::now()); + auth_probe_record_failure_for_testing(b.as_ref(), ip, Instant::now()); + })); + } + + for handle in handles { + handle.await.expect("task join failed"); + } + + assert_eq!(auth_probe_fail_streak_for_testing_in_shared(a.as_ref(), ip), Some(64)); + assert_eq!(auth_probe_fail_streak_for_testing_in_shared(b.as_ref(), ip), Some(64)); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn proxy_shared_state_unknown_sni_parallel_instances_no_cross_cooldown() { + let mut handles = Vec::new(); + let now = Instant::now(); + + for _ in 0..32 { + handles.push(tokio::spawn(async move { + let shared = ProxySharedState::new(); + clear_unknown_sni_warn_state_for_testing_in_shared(shared.as_ref()); + let first = should_emit_unknown_sni_warn_for_testing_in_shared(shared.as_ref(), now); + let second = should_emit_unknown_sni_warn_for_testing_in_shared( + shared.as_ref(), + now + std::time::Duration::from_millis(1), + ); + (first, second) + })); + } + + for handle in handles { + let (first, second) = handle.await.expect("task join failed"); + assert!(first); + assert!(!second); + } +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn proxy_shared_state_auth_probe_high_contention_increments_are_lossless() { + let shared = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared.as_ref()); + + let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 33)); + let workers = 128usize; + let rounds = 20usize; + + for _ in 0..rounds { + let start = Arc::new(Barrier::new(workers)); + let mut handles = Vec::with_capacity(workers); + + for _ in 0..workers { + let shared = shared.clone(); + let start = start.clone(); + handles.push(tokio::spawn(async move { + start.wait().await; + auth_probe_record_failure_for_testing(shared.as_ref(), ip, Instant::now()); + })); + } + + for handle in handles { + handle.await.expect("task join failed"); + } + } + + let expected = (workers * rounds) as u32; + assert_eq!( + auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), ip), + Some(expected), + "auth probe fail streak must account for every concurrent update" + ); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn proxy_shared_state_seed_matrix_concurrency_isolation_no_counter_bleed() { + let seeds: [u64; 8] = [ + 0x0000_0000_0000_0001, + 0x1111_1111_1111_1111, + 0xA5A5_A5A5_A5A5_A5A5, + 0xDEAD_BEEF_CAFE_BABE, + 0x0123_4567_89AB_CDEF, + 0xFEDC_BA98_7654_3210, + 0x0F0F_F0F0_55AA_AA55, + 0x1357_9BDF_2468_ACE0, + ]; + + for seed in seeds { + let mut rng = StdRng::seed_from_u64(seed); + let shared_a = ProxySharedState::new(); + let shared_b = ProxySharedState::new(); + clear_auth_probe_state_for_testing_in_shared(shared_a.as_ref()); + clear_auth_probe_state_for_testing_in_shared(shared_b.as_ref()); + + let ip = IpAddr::V4(Ipv4Addr::new( + 198, + 51, + 100, + rng.random_range(1_u8..=250_u8), + )); + let workers = rng.random_range(16_usize..=48_usize); + let rounds = rng.random_range(4_usize..=10_usize); + + let mut expected_a: u32 = 0; + let mut expected_b: u32 = 0; + + for _ in 0..rounds { + let start = Arc::new(Barrier::new(workers * 2)); + let mut handles = Vec::with_capacity(workers * 2); + + for _ in 0..workers { + let a_ops = rng.random_range(1_u32..=3_u32); + let b_ops = rng.random_range(1_u32..=3_u32); + expected_a = expected_a.saturating_add(a_ops); + expected_b = expected_b.saturating_add(b_ops); + + let shared_a = shared_a.clone(); + let start_a = start.clone(); + handles.push(tokio::spawn(async move { + start_a.wait().await; + for _ in 0..a_ops { + auth_probe_record_failure_for_testing(shared_a.as_ref(), ip, Instant::now()); + } + })); + + let shared_b = shared_b.clone(); + let start_b = start.clone(); + handles.push(tokio::spawn(async move { + start_b.wait().await; + for _ in 0..b_ops { + auth_probe_record_failure_for_testing(shared_b.as_ref(), ip, Instant::now()); + } + })); + } + + for handle in handles { + handle.await.expect("task join failed"); + } + } + + assert_eq!( + auth_probe_fail_streak_for_testing_in_shared(shared_a.as_ref(), ip), + Some(expected_a), + "seed {seed:#x}: instance A streak mismatch" + ); + assert_eq!( + auth_probe_fail_streak_for_testing_in_shared(shared_b.as_ref(), ip), + Some(expected_b), + "seed {seed:#x}: instance B streak mismatch" + ); + + clear_auth_probe_state_for_testing_in_shared(shared_a.as_ref()); + assert_eq!( + auth_probe_fail_streak_for_testing_in_shared(shared_a.as_ref(), ip), + None, + "seed {seed:#x}: clearing A must reset only A" + ); + assert_eq!( + auth_probe_fail_streak_for_testing_in_shared(shared_b.as_ref(), ip), + Some(expected_b), + "seed {seed:#x}: clearing A must not mutate B" + ); + } +}