Phase 2 implemented with additional guards

This commit is contained in:
David Osipov
2026-04-03 02:08:59 +04:00
parent a9f695623d
commit 6ea867ce36
27 changed files with 2513 additions and 1131 deletions

View File

@@ -7,12 +7,6 @@ use std::time::{Duration, Instant};
// --- Helpers ---
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
auth_probe_test_lock()
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner())
}
fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
let mut cfg = ProxyConfig::default();
cfg.access.users.clear();
@@ -147,8 +141,8 @@ fn make_valid_tls_client_hello_with_alpn(
#[tokio::test]
async fn tls_minimum_viable_length_boundary() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret = [0x11u8; 16];
let config = test_config_with_secret_hex("11111111111111111111111111111111");
@@ -200,8 +194,8 @@ async fn tls_minimum_viable_length_boundary() {
#[tokio::test]
async fn mtproto_extreme_dc_index_serialization() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret_hex = "22222222222222222222222222222222";
let config = test_config_with_secret_hex(secret_hex);
@@ -241,8 +235,8 @@ async fn mtproto_extreme_dc_index_serialization() {
#[tokio::test]
async fn alpn_strict_case_and_padding_rejection() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret = [0x33u8; 16];
let mut config = test_config_with_secret_hex("33333333333333333333333333333333");
@@ -297,8 +291,8 @@ fn ipv4_mapped_ipv6_bucketing_anomaly() {
#[tokio::test]
async fn mtproto_invalid_ciphertext_does_not_poison_replay_cache() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret_hex = "55555555555555555555555555555555";
let config = test_config_with_secret_hex(secret_hex);
@@ -341,8 +335,8 @@ async fn mtproto_invalid_ciphertext_does_not_poison_replay_cache() {
#[tokio::test]
async fn tls_invalid_session_does_not_poison_replay_cache() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret = [0x66u8; 16];
let config = test_config_with_secret_hex("66666666666666666666666666666666");
@@ -387,8 +381,8 @@ async fn tls_invalid_session_does_not_poison_replay_cache() {
#[tokio::test]
async fn server_hello_delay_timing_neutrality_on_hmac_failure() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret = [0x77u8; 16];
let mut config = test_config_with_secret_hex("77777777777777777777777777777777");
@@ -425,8 +419,8 @@ async fn server_hello_delay_timing_neutrality_on_hmac_failure() {
#[tokio::test]
async fn server_hello_delay_inversion_resilience() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret = [0x88u8; 16];
let mut config = test_config_with_secret_hex("88888888888888888888888888888888");
@@ -462,10 +456,9 @@ async fn server_hello_delay_inversion_resilience() {
#[tokio::test]
async fn mixed_valid_and_invalid_user_secrets_configuration() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let _warn_guard = warned_secrets_test_lock().lock().unwrap();
clear_warned_secrets_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
clear_warned_secrets_for_testing_in_shared(shared.as_ref());
let mut config = ProxyConfig::default();
config.access.ignore_time_skew = true;
@@ -513,8 +506,8 @@ async fn mixed_valid_and_invalid_user_secrets_configuration() {
#[tokio::test]
async fn tls_emulation_fallback_when_cache_missing() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret = [0xAAu8; 16];
let mut config = test_config_with_secret_hex("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa");
@@ -547,8 +540,8 @@ async fn tls_emulation_fallback_when_cache_missing() {
#[tokio::test]
async fn classic_mode_over_tls_transport_protocol_confusion() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret_hex = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb";
let mut config = test_config_with_secret_hex(secret_hex);
@@ -608,8 +601,8 @@ fn generate_tg_nonce_never_emits_reserved_bytes() {
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn dashmap_concurrent_saturation_stress() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let ip_a: IpAddr = "192.0.2.13".parse().unwrap();
let ip_b: IpAddr = "198.51.100.13".parse().unwrap();
@@ -617,9 +610,10 @@ async fn dashmap_concurrent_saturation_stress() {
for i in 0..100 {
let target_ip = if i % 2 == 0 { ip_a } else { ip_b };
let shared = shared.clone();
tasks.push(tokio::spawn(async move {
for _ in 0..50 {
auth_probe_record_failure(target_ip, Instant::now());
auth_probe_record_failure_in(shared.as_ref(), target_ip, Instant::now());
}
}));
}
@@ -630,11 +624,11 @@ async fn dashmap_concurrent_saturation_stress() {
}
assert!(
auth_probe_is_throttled_for_testing(ip_a),
auth_probe_is_throttled_for_testing_in_shared(shared.as_ref(), ip_a),
"IP A must be throttled after concurrent stress"
);
assert!(
auth_probe_is_throttled_for_testing(ip_b),
auth_probe_is_throttled_for_testing_in_shared(shared.as_ref(), ip_b),
"IP B must be throttled after concurrent stress"
);
}
@@ -661,15 +655,15 @@ fn prototag_invalid_bytes_fail_closed() {
#[test]
fn auth_probe_eviction_hash_collision_stress() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let state = auth_probe_state_map();
let state = auth_probe_state_for_testing_in_shared(shared.as_ref());
let now = Instant::now();
for i in 0..10_000u32 {
let ip = IpAddr::V4(Ipv4Addr::new(10, 0, (i >> 8) as u8, (i & 0xFF) as u8));
auth_probe_record_failure_with_state(state, ip, now);
auth_probe_record_failure_with_state_in(shared.as_ref(), state, ip, now);
}
assert!(

View File

@@ -44,12 +44,6 @@ fn make_valid_mtproto_handshake(
handshake
}
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
auth_probe_test_lock()
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner())
}
fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
let mut cfg = ProxyConfig::default();
cfg.access.users.clear();
@@ -67,8 +61,8 @@ fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
#[tokio::test]
async fn mtproto_handshake_bit_flip_anywhere_rejected() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret_hex = "11223344556677889900aabbccddeeff";
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
@@ -181,26 +175,26 @@ async fn mtproto_handshake_timing_neutrality_mocked() {
#[tokio::test]
async fn auth_probe_throttle_saturation_stress() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let now = Instant::now();
// Record enough failures for one IP to trigger backoff
let target_ip = IpAddr::V4(Ipv4Addr::new(1, 1, 1, 1));
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
auth_probe_record_failure(target_ip, now);
auth_probe_record_failure_in(shared.as_ref(), target_ip, now);
}
assert!(auth_probe_is_throttled(target_ip, now));
assert!(auth_probe_is_throttled_in(shared.as_ref(), target_ip, now));
// Stress test with many unique IPs
for i in 0..500u32 {
let ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, (i % 256) as u8));
auth_probe_record_failure(ip, now);
auth_probe_record_failure_in(shared.as_ref(), ip, now);
}
let tracked = AUTH_PROBE_STATE.get().map(|state| state.len()).unwrap_or(0);
let tracked = auth_probe_state_for_testing_in_shared(shared.as_ref()).len();
assert!(
tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES,
"auth probe state grew past hard cap: {tracked} > {AUTH_PROBE_TRACK_MAX_ENTRIES}"
@@ -209,8 +203,8 @@ async fn auth_probe_throttle_saturation_stress() {
#[tokio::test]
async fn mtproto_handshake_abridged_prefix_rejected() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let mut handshake = [0x5Au8; HANDSHAKE_LEN];
handshake[0] = 0xef; // Abridged prefix
@@ -235,8 +229,8 @@ async fn mtproto_handshake_abridged_prefix_rejected() {
#[tokio::test]
async fn mtproto_handshake_preferred_user_mismatch_continues() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret1_hex = "11111111111111111111111111111111";
let secret2_hex = "22222222222222222222222222222222";
@@ -278,8 +272,8 @@ async fn mtproto_handshake_preferred_user_mismatch_continues() {
#[tokio::test]
async fn mtproto_handshake_concurrent_flood_stability() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret_hex = "00112233445566778899aabbccddeeff";
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 1);
@@ -320,8 +314,8 @@ async fn mtproto_handshake_concurrent_flood_stability() {
#[tokio::test]
async fn mtproto_replay_is_rejected_across_distinct_peers() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret_hex = "0123456789abcdeffedcba9876543210";
let handshake = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
@@ -360,8 +354,8 @@ async fn mtproto_replay_is_rejected_across_distinct_peers() {
#[tokio::test]
async fn mtproto_blackhat_mutation_corpus_never_panics_and_stays_fail_closed() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret_hex = "89abcdef012345670123456789abcdef";
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
@@ -405,27 +399,27 @@ async fn mtproto_blackhat_mutation_corpus_never_panics_and_stays_fail_closed() {
#[tokio::test]
async fn auth_probe_success_clears_throttled_peer_state() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let target_ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 90));
let now = Instant::now();
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
auth_probe_record_failure(target_ip, now);
auth_probe_record_failure_in(shared.as_ref(), target_ip, now);
}
assert!(auth_probe_is_throttled(target_ip, now));
assert!(auth_probe_is_throttled_in(shared.as_ref(), target_ip, now));
auth_probe_record_success(target_ip);
auth_probe_record_success_in(shared.as_ref(), target_ip);
assert!(
!auth_probe_is_throttled(target_ip, now + Duration::from_millis(1)),
!auth_probe_is_throttled_in(shared.as_ref(), target_ip, now + Duration::from_millis(1)),
"successful auth must clear per-peer throttle state"
);
}
#[tokio::test]
async fn mtproto_invalid_storm_over_cap_keeps_probe_map_hard_bounded() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret_hex = "00112233445566778899aabbccddeeff";
let mut invalid = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
@@ -458,7 +452,7 @@ async fn mtproto_invalid_storm_over_cap_keeps_probe_map_hard_bounded() {
assert!(matches!(res, HandshakeResult::BadClient { .. }));
}
let tracked = AUTH_PROBE_STATE.get().map(|state| state.len()).unwrap_or(0);
let tracked = auth_probe_state_for_testing_in_shared(shared.as_ref()).len();
assert!(
tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES,
"probe map must remain bounded under invalid storm: {tracked}"
@@ -467,8 +461,8 @@ async fn mtproto_invalid_storm_over_cap_keeps_probe_map_hard_bounded() {
#[tokio::test]
async fn mtproto_property_style_multi_bit_mutations_fail_closed_or_auth_only() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret_hex = "f0e1d2c3b4a5968778695a4b3c2d1e0f";
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
@@ -520,8 +514,8 @@ async fn mtproto_property_style_multi_bit_mutations_fail_closed_or_auth_only() {
#[tokio::test]
#[ignore = "heavy soak; run manually"]
async fn mtproto_blackhat_20k_mutation_soak_never_panics() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret_hex = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);

View File

@@ -3,15 +3,9 @@ use std::collections::HashSet;
use std::net::{IpAddr, Ipv4Addr};
use std::time::{Duration, Instant};
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
auth_probe_test_lock()
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner())
}
#[test]
fn adversarial_large_state_offsets_escape_first_scan_window() {
let _guard = auth_probe_test_guard();
let shared = ProxySharedState::new();
let base = Instant::now();
let state_len = 65_536usize;
let scan_limit = 1_024usize;
@@ -25,7 +19,7 @@ fn adversarial_large_state_offsets_escape_first_scan_window() {
((i.wrapping_mul(131)) & 0xff) as u8,
));
let now = base + Duration::from_nanos(i);
let start = auth_probe_scan_start_offset(ip, now, state_len, scan_limit);
let start = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit);
if start >= scan_limit {
saw_offset_outside_first_window = true;
break;
@@ -40,7 +34,7 @@ fn adversarial_large_state_offsets_escape_first_scan_window() {
#[test]
fn stress_large_state_offsets_cover_many_scan_windows() {
let _guard = auth_probe_test_guard();
let shared = ProxySharedState::new();
let base = Instant::now();
let state_len = 65_536usize;
let scan_limit = 1_024usize;
@@ -54,7 +48,7 @@ fn stress_large_state_offsets_cover_many_scan_windows() {
((i.wrapping_mul(17)) & 0xff) as u8,
));
let now = base + Duration::from_micros(i);
let start = auth_probe_scan_start_offset(ip, now, state_len, scan_limit);
let start = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit);
covered_windows.insert(start / scan_limit);
}
@@ -68,7 +62,7 @@ fn stress_large_state_offsets_cover_many_scan_windows() {
#[test]
fn light_fuzz_offset_always_stays_inside_state_len() {
let _guard = auth_probe_test_guard();
let shared = ProxySharedState::new();
let mut seed = 0xC0FF_EE12_3456_789Au64;
let base = Instant::now();
@@ -86,7 +80,7 @@ fn light_fuzz_offset_always_stays_inside_state_len() {
let state_len = ((seed >> 16) as usize % 200_000).saturating_add(1);
let scan_limit = ((seed >> 40) as usize % 2_048).saturating_add(1);
let now = base + Duration::from_nanos(seed & 0x0fff);
let start = auth_probe_scan_start_offset(ip, now, state_len, scan_limit);
let start = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit);
assert!(
start < state_len,

View File

@@ -2,68 +2,62 @@ use super::*;
use std::net::{IpAddr, Ipv4Addr};
use std::time::{Duration, Instant};
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
auth_probe_test_lock()
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner())
}
#[test]
fn positive_preauth_throttle_activates_after_failure_threshold() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 20));
let now = Instant::now();
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
auth_probe_record_failure(ip, now);
auth_probe_record_failure_in(shared.as_ref(), ip, now);
}
assert!(
auth_probe_is_throttled(ip, now),
auth_probe_is_throttled_in(shared.as_ref(), ip, now),
"peer must be throttled once fail streak reaches threshold"
);
}
#[test]
fn negative_unrelated_peer_remains_unthrottled() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let attacker = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 12));
let benign = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 13));
let now = Instant::now();
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
auth_probe_record_failure(attacker, now);
auth_probe_record_failure_in(shared.as_ref(), attacker, now);
}
assert!(auth_probe_is_throttled(attacker, now));
assert!(auth_probe_is_throttled_in(shared.as_ref(), attacker, now));
assert!(
!auth_probe_is_throttled(benign, now),
!auth_probe_is_throttled_in(shared.as_ref(), benign, now),
"throttle state must stay scoped to normalized peer key"
);
}
#[test]
fn edge_expired_entry_is_pruned_and_no_longer_throttled() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let ip = IpAddr::V4(Ipv4Addr::new(192, 0, 2, 41));
let base = Instant::now();
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
auth_probe_record_failure(ip, base);
auth_probe_record_failure_in(shared.as_ref(), ip, base);
}
let expired_at = base + Duration::from_secs(AUTH_PROBE_TRACK_RETENTION_SECS + 1);
assert!(
!auth_probe_is_throttled(ip, expired_at),
!auth_probe_is_throttled_in(shared.as_ref(), ip, expired_at),
"expired entries must not keep throttling peers"
);
let state = auth_probe_state_map();
let state = auth_probe_state_for_testing_in_shared(shared.as_ref());
assert!(
state.get(&normalize_auth_probe_ip(ip)).is_none(),
"expired lookup should prune stale state"
@@ -72,36 +66,36 @@ fn edge_expired_entry_is_pruned_and_no_longer_throttled() {
#[test]
fn adversarial_saturation_grace_requires_extra_failures_before_preauth_throttle() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let ip = IpAddr::V4(Ipv4Addr::new(198, 18, 0, 7));
let now = Instant::now();
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
auth_probe_record_failure(ip, now);
auth_probe_record_failure_in(shared.as_ref(), ip, now);
}
auth_probe_note_saturation(now);
auth_probe_note_saturation_in(shared.as_ref(), now);
assert!(
!auth_probe_should_apply_preauth_throttle(ip, now),
!auth_probe_should_apply_preauth_throttle_in(shared.as_ref(), ip, now),
"during global saturation, peer must receive configured grace window"
);
for _ in 0..AUTH_PROBE_SATURATION_GRACE_FAILS {
auth_probe_record_failure(ip, now + Duration::from_millis(1));
auth_probe_record_failure_in(shared.as_ref(), ip, now + Duration::from_millis(1));
}
assert!(
auth_probe_should_apply_preauth_throttle(ip, now + Duration::from_millis(1)),
auth_probe_should_apply_preauth_throttle_in(shared.as_ref(), ip, now + Duration::from_millis(1)),
"after grace failures are exhausted, preauth throttle must activate"
);
}
#[test]
fn integration_over_cap_insertion_keeps_probe_map_bounded() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let now = Instant::now();
for idx in 0..(AUTH_PROBE_TRACK_MAX_ENTRIES + 1024) {
@@ -111,10 +105,10 @@ fn integration_over_cap_insertion_keeps_probe_map_bounded() {
((idx / 256) % 256) as u8,
(idx % 256) as u8,
));
auth_probe_record_failure(ip, now);
auth_probe_record_failure_in(shared.as_ref(), ip, now);
}
let tracked = auth_probe_state_map().len();
let tracked = auth_probe_state_for_testing_in_shared(shared.as_ref()).len();
assert!(
tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES,
"probe map must remain hard bounded under insertion storm"
@@ -123,8 +117,8 @@ fn integration_over_cap_insertion_keeps_probe_map_bounded() {
#[test]
fn light_fuzz_randomized_failures_preserve_cap_and_nonzero_streaks() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let mut seed = 0x4D53_5854_6F66_6175u64;
let now = Instant::now();
@@ -140,10 +134,10 @@ fn light_fuzz_randomized_failures_preserve_cap_and_nonzero_streaks() {
(seed >> 8) as u8,
seed as u8,
));
auth_probe_record_failure(ip, now + Duration::from_millis((seed & 0x3f) as u64));
auth_probe_record_failure_in(shared.as_ref(), ip, now + Duration::from_millis((seed & 0x3f) as u64));
}
let state = auth_probe_state_map();
let state = auth_probe_state_for_testing_in_shared(shared.as_ref());
assert!(state.len() <= AUTH_PROBE_TRACK_MAX_ENTRIES);
for entry in state.iter() {
assert!(entry.value().fail_streak > 0);
@@ -152,13 +146,14 @@ fn light_fuzz_randomized_failures_preserve_cap_and_nonzero_streaks() {
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn stress_parallel_failure_flood_keeps_state_hard_capped() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let start = Instant::now();
let mut tasks = Vec::new();
for worker in 0..8u8 {
let shared = shared.clone();
tasks.push(tokio::spawn(async move {
for i in 0..4096u32 {
let ip = IpAddr::V4(Ipv4Addr::new(
@@ -167,7 +162,7 @@ async fn stress_parallel_failure_flood_keeps_state_hard_capped() {
((i >> 8) & 0xff) as u8,
(i & 0xff) as u8,
));
auth_probe_record_failure(ip, start + Duration::from_millis((i % 4) as u64));
auth_probe_record_failure_in(shared.as_ref(), ip, start + Duration::from_millis((i % 4) as u64));
}
}));
}
@@ -176,12 +171,12 @@ async fn stress_parallel_failure_flood_keeps_state_hard_capped() {
task.await.expect("stress worker must not panic");
}
let tracked = auth_probe_state_map().len();
let tracked = auth_probe_state_for_testing_in_shared(shared.as_ref()).len();
assert!(
tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES,
"parallel failure flood must not exceed cap"
);
let probe = IpAddr::V4(Ipv4Addr::new(172, 3, 4, 5));
let _ = auth_probe_is_throttled(probe, start + Duration::from_millis(2));
let _ = auth_probe_is_throttled_in(shared.as_ref(), probe, start + Duration::from_millis(2));
}

View File

@@ -2,20 +2,14 @@ use super::*;
use std::net::{IpAddr, Ipv4Addr};
use std::time::{Duration, Instant};
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
auth_probe_test_lock()
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner())
}
#[test]
fn edge_zero_state_len_yields_zero_start_offset() {
let _guard = auth_probe_test_guard();
let shared = ProxySharedState::new();
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 44));
let now = Instant::now();
assert_eq!(
auth_probe_scan_start_offset(ip, now, 0, 16),
auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, 0, 16),
0,
"empty map must not produce non-zero scan offset"
);
@@ -23,7 +17,7 @@ fn edge_zero_state_len_yields_zero_start_offset() {
#[test]
fn adversarial_large_state_must_allow_start_offset_outside_scan_budget_window() {
let _guard = auth_probe_test_guard();
let shared = ProxySharedState::new();
let base = Instant::now();
let scan_limit = 16usize;
let state_len = 65_536usize;
@@ -37,7 +31,7 @@ fn adversarial_large_state_must_allow_start_offset_outside_scan_budget_window()
(i & 0xff) as u8,
));
let now = base + Duration::from_micros(i as u64);
let start = auth_probe_scan_start_offset(ip, now, state_len, scan_limit);
let start = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit);
assert!(
start < state_len,
"start offset must stay within state length; start={start}, len={state_len}"
@@ -56,12 +50,12 @@ fn adversarial_large_state_must_allow_start_offset_outside_scan_budget_window()
#[test]
fn positive_state_smaller_than_scan_limit_caps_to_state_len() {
let _guard = auth_probe_test_guard();
let shared = ProxySharedState::new();
let ip = IpAddr::V4(Ipv4Addr::new(192, 0, 2, 17));
let now = Instant::now();
for state_len in 1..32usize {
let start = auth_probe_scan_start_offset(ip, now, state_len, 64);
let start = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, 64);
assert!(
start < state_len,
"start offset must never exceed state length when scan limit is larger"
@@ -71,7 +65,7 @@ fn positive_state_smaller_than_scan_limit_caps_to_state_len() {
#[test]
fn light_fuzz_scan_offset_budget_never_exceeds_effective_window() {
let _guard = auth_probe_test_guard();
let shared = ProxySharedState::new();
let mut seed = 0x5A41_5356_4C32_3236u64;
let base = Instant::now();
@@ -89,7 +83,7 @@ fn light_fuzz_scan_offset_budget_never_exceeds_effective_window() {
let state_len = ((seed >> 8) as usize % 131_072).saturating_add(1);
let scan_limit = ((seed >> 32) as usize % 512).saturating_add(1);
let now = base + Duration::from_nanos(seed & 0xffff);
let start = auth_probe_scan_start_offset(ip, now, state_len, scan_limit);
let start = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit);
assert!(
start < state_len,

View File

@@ -3,22 +3,16 @@ use std::collections::HashSet;
use std::net::{IpAddr, Ipv4Addr};
use std::time::{Duration, Instant};
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
auth_probe_test_lock()
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner())
}
#[test]
fn positive_same_ip_moving_time_yields_diverse_scan_offsets() {
let _guard = auth_probe_test_guard();
let shared = ProxySharedState::new();
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 77));
let base = Instant::now();
let mut uniq = HashSet::new();
for i in 0..512u64 {
let now = base + Duration::from_nanos(i);
let offset = auth_probe_scan_start_offset(ip, now, 65_536, 16);
let offset = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, 65_536, 16);
uniq.insert(offset);
}
@@ -31,7 +25,7 @@ fn positive_same_ip_moving_time_yields_diverse_scan_offsets() {
#[test]
fn adversarial_many_ips_same_time_spreads_offsets_without_bias_collapse() {
let _guard = auth_probe_test_guard();
let shared = ProxySharedState::new();
let now = Instant::now();
let mut uniq = HashSet::new();
@@ -42,7 +36,7 @@ fn adversarial_many_ips_same_time_spreads_offsets_without_bias_collapse() {
i as u8,
(255 - (i as u8)),
));
uniq.insert(auth_probe_scan_start_offset(ip, now, 65_536, 16));
uniq.insert(auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, 65_536, 16));
}
assert!(
@@ -54,12 +48,13 @@ fn adversarial_many_ips_same_time_spreads_offsets_without_bias_collapse() {
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn stress_parallel_failure_churn_under_saturation_remains_capped_and_live() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let start = Instant::now();
let mut workers = Vec::new();
for worker in 0..8u8 {
let shared = shared.clone();
workers.push(tokio::spawn(async move {
for i in 0..8192u32 {
let ip = IpAddr::V4(Ipv4Addr::new(
@@ -68,7 +63,7 @@ async fn stress_parallel_failure_churn_under_saturation_remains_capped_and_live(
((i >> 8) & 0xff) as u8,
(i & 0xff) as u8,
));
auth_probe_record_failure(ip, start + Duration::from_micros((i % 128) as u64));
auth_probe_record_failure_in(shared.as_ref(), ip, start + Duration::from_micros((i % 128) as u64));
}
}));
}
@@ -78,17 +73,17 @@ async fn stress_parallel_failure_churn_under_saturation_remains_capped_and_live(
}
assert!(
auth_probe_state_map().len() <= AUTH_PROBE_TRACK_MAX_ENTRIES,
auth_probe_state_for_testing_in_shared(shared.as_ref()).len() <= AUTH_PROBE_TRACK_MAX_ENTRIES,
"state must remain hard-capped under parallel saturation churn"
);
let probe = IpAddr::V4(Ipv4Addr::new(10, 4, 1, 1));
let _ = auth_probe_should_apply_preauth_throttle(probe, start + Duration::from_millis(1));
let _ = auth_probe_should_apply_preauth_throttle_in(shared.as_ref(), probe, start + Duration::from_millis(1));
}
#[test]
fn light_fuzz_scan_offset_stays_within_window_for_randomized_inputs() {
let _guard = auth_probe_test_guard();
let shared = ProxySharedState::new();
let mut seed = 0xA55A_1357_2468_9BDFu64;
let base = Instant::now();
@@ -107,7 +102,7 @@ fn light_fuzz_scan_offset_stays_within_window_for_randomized_inputs() {
let scan_limit = ((seed >> 40) as usize % 1024).saturating_add(1);
let now = base + Duration::from_nanos(seed & 0x1fff);
let offset = auth_probe_scan_start_offset(ip, now, state_len, scan_limit);
let offset = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit);
assert!(
offset < state_len,
"scan offset must always remain inside state length"

View File

@@ -36,16 +36,10 @@ fn make_valid_tls_handshake(secret: &[u8], timestamp: u32) -> Vec<u8> {
handshake
}
fn test_lock_guard() -> std::sync::MutexGuard<'static, ()> {
auth_probe_test_lock()
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner())
}
#[tokio::test]
async fn handshake_baseline_probe_always_falls_back_to_masking() {
let _guard = test_lock_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let cfg = test_config_with_secret_hex("11111111111111111111111111111111");
let replay_checker = ReplayChecker::new(64, Duration::from_secs(60));
@@ -70,8 +64,8 @@ async fn handshake_baseline_probe_always_falls_back_to_masking() {
#[tokio::test]
async fn handshake_baseline_invalid_secret_triggers_fallback_not_error_response() {
let _guard = test_lock_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let good_secret = [0x22u8; 16];
let bad_cfg = test_config_with_secret_hex("33333333333333333333333333333333");
@@ -97,8 +91,8 @@ async fn handshake_baseline_invalid_secret_triggers_fallback_not_error_response(
#[tokio::test]
async fn handshake_baseline_auth_probe_streak_increments_per_ip() {
let _guard = test_lock_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let cfg = test_config_with_secret_hex("44444444444444444444444444444444");
let replay_checker = ReplayChecker::new(64, Duration::from_secs(60));
@@ -109,7 +103,7 @@ async fn handshake_baseline_auth_probe_streak_increments_per_ip() {
let bad_probe = b"\x16\x03\x01\x00";
for expected in 1..=3 {
let res = handle_tls_handshake(
let res = handle_tls_handshake_with_shared(
bad_probe,
tokio::io::empty(),
tokio::io::sink(),
@@ -118,43 +112,44 @@ async fn handshake_baseline_auth_probe_streak_increments_per_ip() {
&replay_checker,
&rng,
None,
shared.as_ref(),
)
.await;
assert!(matches!(res, HandshakeResult::BadClient { .. }));
assert_eq!(auth_probe_fail_streak_for_testing(peer.ip()), Some(expected));
assert_eq!(auth_probe_fail_streak_for_testing(untouched_ip), None);
assert_eq!(auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), Some(expected));
assert_eq!(auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), untouched_ip), None);
}
}
#[test]
fn handshake_baseline_saturation_fires_at_compile_time_threshold() {
let _guard = test_lock_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 33));
let now = Instant::now();
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS.saturating_sub(1) {
auth_probe_record_failure(ip, now);
auth_probe_record_failure_in(shared.as_ref(), ip, now);
}
assert!(!auth_probe_is_throttled(ip, now));
assert!(!auth_probe_is_throttled_in(shared.as_ref(), ip, now));
auth_probe_record_failure(ip, now);
assert!(auth_probe_is_throttled(ip, now));
auth_probe_record_failure_in(shared.as_ref(), ip, now);
assert!(auth_probe_is_throttled_in(shared.as_ref(), ip, now));
}
#[test]
fn handshake_baseline_repeated_probes_streak_monotonic() {
let _guard = test_lock_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 42));
let now = Instant::now();
let mut prev = 0u32;
for _ in 0..100 {
auth_probe_record_failure(ip, now);
let current = auth_probe_fail_streak_for_testing(ip).unwrap_or(0);
auth_probe_record_failure_in(shared.as_ref(), ip, now);
let current = auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), ip).unwrap_or(0);
assert!(current >= prev, "streak must be monotonic");
prev = current;
}
@@ -162,14 +157,14 @@ fn handshake_baseline_repeated_probes_streak_monotonic() {
#[test]
fn handshake_baseline_throttled_ip_incurs_backoff_delay() {
let _guard = test_lock_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 44));
let now = Instant::now();
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
auth_probe_record_failure(ip, now);
auth_probe_record_failure_in(shared.as_ref(), ip, now);
}
let delay = auth_probe_backoff(AUTH_PROBE_BACKOFF_START_FAILS);
@@ -178,14 +173,14 @@ fn handshake_baseline_throttled_ip_incurs_backoff_delay() {
let before_expiry = now + delay.saturating_sub(Duration::from_millis(1));
let after_expiry = now + delay + Duration::from_millis(1);
assert!(auth_probe_is_throttled(ip, before_expiry));
assert!(!auth_probe_is_throttled(ip, after_expiry));
assert!(auth_probe_is_throttled_in(shared.as_ref(), ip, before_expiry));
assert!(!auth_probe_is_throttled_in(shared.as_ref(), ip, after_expiry));
}
#[tokio::test]
async fn handshake_baseline_malformed_probe_frames_fail_closed_to_masking() {
let _guard = test_lock_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let cfg = test_config_with_secret_hex("55555555555555555555555555555555");
let replay_checker = ReplayChecker::new(64, Duration::from_secs(60));

View File

@@ -67,16 +67,10 @@ fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
cfg
}
fn auth_probe_test_guard() -> MutexGuard<'static, ()> {
auth_probe_test_lock()
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner())
}
#[tokio::test]
async fn mtproto_handshake_duplicate_digest_is_replayed_on_second_attempt() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret_hex = "11223344556677889900aabbccddeeff";
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
@@ -110,13 +104,13 @@ async fn mtproto_handshake_duplicate_digest_is_replayed_on_second_attempt() {
.await;
assert!(matches!(second, HandshakeResult::BadClient { .. }));
clear_auth_probe_state_for_testing();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
}
#[tokio::test]
async fn mtproto_handshake_fuzz_corpus_never_panics_and_stays_fail_closed() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret_hex = "00112233445566778899aabbccddeeff";
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 1);
@@ -178,13 +172,13 @@ async fn mtproto_handshake_fuzz_corpus_never_panics_and_stays_fail_closed() {
);
}
clear_auth_probe_state_for_testing();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
}
#[tokio::test]
async fn mtproto_handshake_mixed_corpus_never_panics_and_exact_duplicates_are_rejected() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret_hex = "99887766554433221100ffeeddccbbaa";
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 4);
@@ -274,5 +268,5 @@ async fn mtproto_handshake_mixed_corpus_never_panics_and_exact_duplicates_are_re
);
}
clear_auth_probe_state_for_testing();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
}

View File

@@ -11,12 +11,6 @@ use tokio::sync::Barrier;
// --- Helpers ---
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
auth_probe_test_lock()
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner())
}
fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
let mut cfg = ProxyConfig::default();
cfg.access.users.clear();
@@ -164,8 +158,8 @@ fn make_valid_tls_client_hello_with_sni_and_alpn(
#[tokio::test]
async fn server_hello_delay_bypassed_if_max_is_zero_despite_high_min() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret = [0x1Au8; 16];
let mut config = test_config_with_secret_hex("1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a");
@@ -201,10 +195,10 @@ async fn server_hello_delay_bypassed_if_max_is_zero_despite_high_min() {
#[test]
fn auth_probe_backoff_extreme_fail_streak_clamps_safely() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let state = auth_probe_state_map();
let state = auth_probe_state_for_testing_in_shared(shared.as_ref());
let peer_ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 99));
let now = Instant::now();
@@ -217,7 +211,7 @@ fn auth_probe_backoff_extreme_fail_streak_clamps_safely() {
},
);
auth_probe_record_failure_with_state(&state, peer_ip, now);
auth_probe_record_failure_with_state_in(shared.as_ref(), &state, peer_ip, now);
let updated = state.get(&peer_ip).unwrap();
assert_eq!(updated.fail_streak, u32::MAX);
@@ -270,8 +264,8 @@ fn generate_tg_nonce_cryptographic_uniqueness_and_entropy() {
#[tokio::test]
async fn mtproto_multi_user_decryption_isolation() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let mut config = ProxyConfig::default();
config.general.modes.secure = true;
@@ -323,10 +317,8 @@ async fn mtproto_multi_user_decryption_isolation() {
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn invalid_secret_warning_lock_contention_and_bound() {
let _guard = warned_secrets_test_lock()
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
clear_warned_secrets_for_testing();
let shared = ProxySharedState::new();
clear_warned_secrets_for_testing_in_shared(shared.as_ref());
let tasks = 50;
let iterations_per_task = 100;
@@ -335,11 +327,12 @@ async fn invalid_secret_warning_lock_contention_and_bound() {
for t in 0..tasks {
let b = barrier.clone();
let shared = shared.clone();
handles.push(tokio::spawn(async move {
b.wait().await;
for i in 0..iterations_per_task {
let user_name = format!("contention_user_{}_{}", t, i);
warn_invalid_secret_once(&user_name, "invalid_hex", ACCESS_SECRET_BYTES, None);
warn_invalid_secret_once_in(shared.as_ref(), &user_name, "invalid_hex", ACCESS_SECRET_BYTES, None);
}
}));
}
@@ -348,7 +341,7 @@ async fn invalid_secret_warning_lock_contention_and_bound() {
handle.await.unwrap();
}
let warned = INVALID_SECRET_WARNED.get().unwrap();
let warned = warned_secrets_for_testing_in_shared(shared.as_ref());
let guard = warned
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
@@ -362,8 +355,8 @@ async fn invalid_secret_warning_lock_contention_and_bound() {
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn mtproto_strict_concurrent_replay_race_condition() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret_hex = "4A4A4A4A4A4A4A4A4A4A4A4A4A4A4A4A";
let config = Arc::new(test_config_with_secret_hex(secret_hex));
@@ -428,8 +421,8 @@ async fn mtproto_strict_concurrent_replay_race_condition() {
#[tokio::test]
async fn tls_alpn_zero_length_protocol_handled_safely() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret = [0x5Bu8; 16];
let mut config = test_config_with_secret_hex("5b5b5b5b5b5b5b5b5b5b5b5b5b5b5b5b");
@@ -461,8 +454,8 @@ async fn tls_alpn_zero_length_protocol_handled_safely() {
#[tokio::test]
async fn tls_sni_massive_hostname_does_not_panic() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret = [0x6Cu8; 16];
let config = test_config_with_secret_hex("6c6c6c6c6c6c6c6c6c6c6c6c6c6c6c6c");
@@ -497,8 +490,8 @@ async fn tls_sni_massive_hostname_does_not_panic() {
#[tokio::test]
async fn tls_progressive_truncation_fuzzing_no_panics() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret = [0x7Du8; 16];
let config = test_config_with_secret_hex("7d7d7d7d7d7d7d7d7d7d7d7d7d7d7d7d");
@@ -535,8 +528,8 @@ async fn tls_progressive_truncation_fuzzing_no_panics() {
#[tokio::test]
async fn mtproto_pure_entropy_fuzzing_no_panics() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let config = test_config_with_secret_hex("8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e");
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
@@ -569,10 +562,8 @@ async fn mtproto_pure_entropy_fuzzing_no_panics() {
#[test]
fn decode_user_secret_odd_length_hex_rejection() {
let _guard = warned_secrets_test_lock()
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
clear_warned_secrets_for_testing();
let shared = ProxySharedState::new();
clear_warned_secrets_for_testing_in_shared(shared.as_ref());
let mut config = ProxyConfig::default();
config.access.users.clear();
@@ -581,7 +572,7 @@ fn decode_user_secret_odd_length_hex_rejection() {
"1234567890123456789012345678901".to_string(),
);
let decoded = decode_user_secrets(&config, None);
let decoded = decode_user_secrets_in(shared.as_ref(), &config, None);
assert!(
decoded.is_empty(),
"Odd-length hex string must be gracefully rejected by hex::decode without unwrapping"
@@ -590,10 +581,10 @@ fn decode_user_secret_odd_length_hex_rejection() {
#[test]
fn saturation_grace_pre_existing_high_fail_streak_immediate_throttle() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let state = auth_probe_state_map();
let state = auth_probe_state_for_testing_in_shared(shared.as_ref());
let peer_ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 112));
let now = Instant::now();
@@ -608,7 +599,7 @@ fn saturation_grace_pre_existing_high_fail_streak_immediate_throttle() {
);
{
let mut guard = auth_probe_saturation_state_lock();
let mut guard = auth_probe_saturation_state_lock_for_testing_in_shared(shared.as_ref());
*guard = Some(AuthProbeSaturationState {
fail_streak: AUTH_PROBE_BACKOFF_START_FAILS,
blocked_until: now + Duration::from_secs(5),
@@ -616,7 +607,7 @@ fn saturation_grace_pre_existing_high_fail_streak_immediate_throttle() {
});
}
let is_throttled = auth_probe_should_apply_preauth_throttle(peer_ip, now);
let is_throttled = auth_probe_should_apply_preauth_throttle_in(shared.as_ref(), peer_ip, now);
assert!(
is_throttled,
"A peer with a pre-existing high fail streak must be immediately throttled when saturation begins, receiving no unearned grace period"
@@ -625,21 +616,21 @@ fn saturation_grace_pre_existing_high_fail_streak_immediate_throttle() {
#[test]
fn auth_probe_saturation_note_resets_retention_window() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let base_time = Instant::now();
auth_probe_note_saturation(base_time);
auth_probe_note_saturation_in(shared.as_ref(), base_time);
let later = base_time + Duration::from_secs(AUTH_PROBE_TRACK_RETENTION_SECS - 1);
auth_probe_note_saturation(later);
auth_probe_note_saturation_in(shared.as_ref(), later);
let check_time = base_time + Duration::from_secs(AUTH_PROBE_TRACK_RETENTION_SECS + 5);
// This call may return false if backoff has elapsed, but it must not clear
// the saturation state because `later` refreshed last_seen.
let _ = auth_probe_saturation_is_throttled_at_for_testing(check_time);
let guard = auth_probe_saturation_state_lock();
let _ = auth_probe_saturation_is_throttled_at_for_testing_in_shared(shared.as_ref(), check_time);
let guard = auth_probe_saturation_state_lock_for_testing_in_shared(shared.as_ref());
assert!(
guard.is_some(),
"Ongoing saturation notes must refresh last_seen so saturation state remains retained past the original window"

View File

@@ -6,12 +6,6 @@ use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::sync::Barrier;
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
auth_probe_test_lock()
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner())
}
fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
let mut cfg = ProxyConfig::default();
cfg.access.users.clear();
@@ -127,8 +121,8 @@ fn make_valid_mtproto_handshake(
#[tokio::test]
async fn tls_alpn_reject_does_not_pollute_replay_cache() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let secret = [0x11u8; 16];
let mut config = test_config_with_secret_hex("11111111111111111111111111111111");
@@ -164,8 +158,8 @@ async fn tls_alpn_reject_does_not_pollute_replay_cache() {
#[tokio::test]
async fn tls_truncated_session_id_len_fails_closed_without_panic() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let config = test_config_with_secret_hex("33333333333333333333333333333333");
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
@@ -193,10 +187,10 @@ async fn tls_truncated_session_id_len_fails_closed_without_panic() {
#[test]
fn auth_probe_eviction_identical_timestamps_keeps_map_bounded() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let state = auth_probe_state_map();
let state = auth_probe_state_for_testing_in_shared(shared.as_ref());
let same = Instant::now();
for i in 0..AUTH_PROBE_TRACK_MAX_ENTRIES {
@@ -212,7 +206,7 @@ fn auth_probe_eviction_identical_timestamps_keeps_map_bounded() {
}
let new_ip = IpAddr::V4(Ipv4Addr::new(192, 168, 21, 21));
auth_probe_record_failure_with_state(state, new_ip, same + Duration::from_millis(1));
auth_probe_record_failure_with_state_in(shared.as_ref(), state, new_ip, same + Duration::from_millis(1));
assert_eq!(state.len(), AUTH_PROBE_TRACK_MAX_ENTRIES);
assert!(state.contains_key(&new_ip));
@@ -220,21 +214,21 @@ fn auth_probe_eviction_identical_timestamps_keeps_map_bounded() {
#[test]
fn clear_auth_probe_state_recovers_from_poisoned_saturation_lock() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let saturation = auth_probe_saturation_state();
let shared_for_poison = shared.clone();
let poison_thread = std::thread::spawn(move || {
let _hold = saturation
let _hold = auth_probe_saturation_state_for_testing_in_shared(shared_for_poison.as_ref())
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
panic!("intentional poison for regression coverage");
});
let _ = poison_thread.join();
clear_auth_probe_state_for_testing();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let guard = auth_probe_saturation_state()
let guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref())
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
assert!(guard.is_none());
@@ -242,12 +236,9 @@ fn clear_auth_probe_state_recovers_from_poisoned_saturation_lock() {
#[tokio::test]
async fn mtproto_invalid_length_secret_is_ignored_and_valid_user_still_auths() {
let _probe_guard = auth_probe_test_guard();
let _warn_guard = warned_secrets_test_lock()
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
clear_auth_probe_state_for_testing();
clear_warned_secrets_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
clear_warned_secrets_for_testing_in_shared(shared.as_ref());
let mut config = ProxyConfig::default();
config.general.modes.secure = true;
@@ -285,14 +276,14 @@ async fn mtproto_invalid_length_secret_is_ignored_and_valid_user_still_auths() {
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn saturation_grace_exhaustion_under_concurrency_keeps_peer_throttled() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let peer_ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 80));
let now = Instant::now();
{
let mut guard = auth_probe_saturation_state()
let mut guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref())
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
*guard = Some(AuthProbeSaturationState {
@@ -302,7 +293,7 @@ async fn saturation_grace_exhaustion_under_concurrency_keeps_peer_throttled() {
});
}
let state = auth_probe_state_map();
let state = auth_probe_state_for_testing_in_shared(shared.as_ref());
state.insert(
peer_ip,
AuthProbeState {
@@ -318,9 +309,10 @@ async fn saturation_grace_exhaustion_under_concurrency_keeps_peer_throttled() {
for _ in 0..tasks {
let b = barrier.clone();
let shared = shared.clone();
handles.push(tokio::spawn(async move {
b.wait().await;
auth_probe_record_failure(peer_ip, Instant::now());
auth_probe_record_failure_in(shared.as_ref(), peer_ip, Instant::now());
}));
}
@@ -333,7 +325,7 @@ async fn saturation_grace_exhaustion_under_concurrency_keeps_peer_throttled() {
final_state.fail_streak
>= AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS
);
assert!(auth_probe_should_apply_preauth_throttle(
assert!(auth_probe_should_apply_preauth_throttle_in(shared.as_ref(),
peer_ip,
Instant::now()
));

View File

@@ -1,46 +1,39 @@
use super::*;
use std::time::{Duration, Instant};
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
auth_probe_test_lock()
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner())
}
fn poison_saturation_mutex() {
let saturation = auth_probe_saturation_state();
let poison_thread = std::thread::spawn(move || {
fn poison_saturation_mutex(shared: &ProxySharedState) {
let saturation = auth_probe_saturation_state_for_testing_in_shared(shared);
let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
let _guard = saturation
.lock()
.expect("saturation mutex must be lockable for poison setup");
panic!("intentional poison for saturation mutex resilience test");
});
let _ = poison_thread.join();
}));
}
#[test]
fn auth_probe_saturation_note_recovers_after_mutex_poison() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
poison_saturation_mutex();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
poison_saturation_mutex(shared.as_ref());
let now = Instant::now();
auth_probe_note_saturation(now);
auth_probe_note_saturation_in(shared.as_ref(), now);
assert!(
auth_probe_saturation_is_throttled_at_for_testing(now),
auth_probe_saturation_is_throttled_at_for_testing_in_shared(shared.as_ref(), now),
"poisoned saturation mutex must not disable saturation throttling"
);
}
#[test]
fn auth_probe_saturation_check_recovers_after_mutex_poison() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
poison_saturation_mutex();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
poison_saturation_mutex(shared.as_ref());
{
let mut guard = auth_probe_saturation_state_lock();
let mut guard = auth_probe_saturation_state_lock_for_testing_in_shared(shared.as_ref());
*guard = Some(AuthProbeSaturationState {
fail_streak: AUTH_PROBE_BACKOFF_START_FAILS,
blocked_until: Instant::now() + Duration::from_millis(10),
@@ -49,23 +42,23 @@ fn auth_probe_saturation_check_recovers_after_mutex_poison() {
}
assert!(
auth_probe_saturation_is_throttled_for_testing(),
auth_probe_saturation_is_throttled_for_testing_in_shared(shared.as_ref()),
"throttle check must recover poisoned saturation mutex and stay fail-closed"
);
}
#[test]
fn clear_auth_probe_state_clears_saturation_even_if_poisoned() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
poison_saturation_mutex();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
poison_saturation_mutex(shared.as_ref());
auth_probe_note_saturation(Instant::now());
assert!(auth_probe_saturation_is_throttled_for_testing());
auth_probe_note_saturation_in(shared.as_ref(), Instant::now());
assert!(auth_probe_saturation_is_throttled_for_testing_in_shared(shared.as_ref()));
clear_auth_probe_state_for_testing();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
assert!(
!auth_probe_saturation_is_throttled_for_testing(),
!auth_probe_saturation_is_throttled_for_testing_in_shared(shared.as_ref()),
"clear helper must clear saturation state even after poison"
);
}

File diff suppressed because it is too large Load Diff

View File

@@ -4,12 +4,6 @@ use crate::protocol::constants::{ProtoTag, TLS_RECORD_HANDSHAKE, TLS_VERSION};
use std::net::SocketAddr;
use std::time::{Duration, Instant};
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
auth_probe_test_lock()
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner())
}
fn make_valid_mtproto_handshake(
secret_hex: &str,
proto_tag: ProtoTag,
@@ -149,8 +143,8 @@ fn median_ns(samples: &mut [u128]) -> u128 {
#[tokio::test]
#[ignore = "manual benchmark: timing-sensitive and host-dependent"]
async fn mtproto_user_scan_timing_manual_benchmark() {
let _guard = auth_probe_test_guard();
clear_auth_probe_state_for_testing();
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
const DECOY_USERS: usize = 8_000;
const ITERATIONS: usize = 250;
@@ -243,7 +237,7 @@ async fn mtproto_user_scan_timing_manual_benchmark() {
#[tokio::test]
#[ignore = "manual benchmark: timing-sensitive and host-dependent"]
async fn tls_sni_preferred_vs_no_sni_fallback_manual_benchmark() {
let _guard = auth_probe_test_guard();
let shared = ProxySharedState::new();
const DECOY_USERS: usize = 8_000;
const ITERATIONS: usize = 250;
@@ -281,7 +275,7 @@ async fn tls_sni_preferred_vs_no_sni_fallback_manual_benchmark() {
let no_sni = make_valid_tls_handshake(&target_secret, (i as u32).wrapping_add(10_000));
let started_sni = Instant::now();
let sni_secrets = decode_user_secrets(&config, Some(preferred_user));
let sni_secrets = decode_user_secrets_in(shared.as_ref(), &config, Some(preferred_user));
let sni_result = tls::validate_tls_handshake_with_replay_window(
&with_sni,
&sni_secrets,
@@ -292,7 +286,7 @@ async fn tls_sni_preferred_vs_no_sni_fallback_manual_benchmark() {
assert!(sni_result.is_some());
let started_no_sni = Instant::now();
let no_sni_secrets = decode_user_secrets(&config, None);
let no_sni_secrets = decode_user_secrets_in(shared.as_ref(), &config, None);
let no_sni_result = tls::validate_tls_handshake_with_replay_window(
&no_sni,
&no_sni_secrets,

View File

@@ -3,36 +3,39 @@ use std::time::{Duration, Instant};
#[test]
fn middle_relay_baseline_public_api_idle_roundtrip_contract() {
let _guard = relay_idle_pressure_test_scope();
clear_relay_idle_pressure_state_for_testing();
let shared = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
assert!(mark_relay_idle_candidate(7001));
assert_eq!(oldest_relay_idle_candidate(), Some(7001));
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 7001));
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(7001));
clear_relay_idle_candidate(7001);
assert_ne!(oldest_relay_idle_candidate(), Some(7001));
clear_relay_idle_candidate_for_testing(shared.as_ref(), 7001);
assert_ne!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(7001));
assert!(mark_relay_idle_candidate(7001));
assert_eq!(oldest_relay_idle_candidate(), Some(7001));
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 7001));
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(7001));
clear_relay_idle_pressure_state_for_testing();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
}
#[test]
fn middle_relay_baseline_public_api_desync_window_contract() {
let _guard = desync_dedup_test_lock()
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
clear_desync_dedup_for_testing();
let shared = ProxySharedState::new();
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
let key = 0xDEAD_BEEF_0000_0001u64;
let t0 = Instant::now();
assert!(should_emit_full_desync(key, false, t0));
assert!(!should_emit_full_desync(key, false, t0 + Duration::from_secs(1)));
assert!(should_emit_full_desync_for_testing(shared.as_ref(), key, false, t0));
assert!(!should_emit_full_desync_for_testing(
shared.as_ref(),
key,
false,
t0 + Duration::from_secs(1)
));
let t1 = t0 + DESYNC_DEDUP_WINDOW + Duration::from_millis(10);
assert!(should_emit_full_desync(key, false, t1));
assert!(should_emit_full_desync_for_testing(shared.as_ref(), key, false, t1));
clear_desync_dedup_for_testing();
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
}

View File

@@ -5,22 +5,20 @@ use std::thread;
#[test]
fn desync_all_full_bypass_does_not_initialize_or_grow_dedup_cache() {
let _guard = desync_dedup_test_lock()
.lock()
.expect("desync dedup test lock must be available");
clear_desync_dedup_for_testing();
let shared = ProxySharedState::new();
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
let initial_len = DESYNC_DEDUP.get().map(|dedup| dedup.len()).unwrap_or(0);
let initial_len = desync_dedup_len_for_testing(shared.as_ref());
let now = Instant::now();
for i in 0..20_000u64 {
assert!(
should_emit_full_desync(0xD35E_D000_0000_0000u64 ^ i, true, now),
should_emit_full_desync_for_testing(shared.as_ref(), 0xD35E_D000_0000_0000u64 ^ i, true, now),
"desync_all_full path must always emit"
);
}
let after_len = DESYNC_DEDUP.get().map(|dedup| dedup.len()).unwrap_or(0);
let after_len = desync_dedup_len_for_testing(shared.as_ref());
assert_eq!(
after_len, initial_len,
"desync_all_full bypass must not allocate or accumulate dedup entries"
@@ -29,39 +27,34 @@ fn desync_all_full_bypass_does_not_initialize_or_grow_dedup_cache() {
#[test]
fn desync_all_full_bypass_keeps_existing_dedup_entries_unchanged() {
let _guard = desync_dedup_test_lock()
.lock()
.expect("desync dedup test lock must be available");
clear_desync_dedup_for_testing();
let shared = ProxySharedState::new();
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
let dedup = DESYNC_DEDUP.get_or_init(DashMap::new);
let seed_time = Instant::now() - Duration::from_secs(7);
dedup.insert(0xAAAABBBBCCCCDDDD, seed_time);
dedup.insert(0x1111222233334444, seed_time);
desync_dedup_insert_for_testing(shared.as_ref(), 0xAAAABBBBCCCCDDDD, seed_time);
desync_dedup_insert_for_testing(shared.as_ref(), 0x1111222233334444, seed_time);
let now = Instant::now();
for i in 0..2048u64 {
assert!(
should_emit_full_desync(0xF011_F000_0000_0000u64 ^ i, true, now),
should_emit_full_desync_for_testing(shared.as_ref(), 0xF011_F000_0000_0000u64 ^ i, true, now),
"desync_all_full must bypass suppression and dedup refresh"
);
}
assert_eq!(
dedup.len(),
desync_dedup_len_for_testing(shared.as_ref()),
2,
"bypass path must not mutate dedup cardinality"
);
assert_eq!(
*dedup
.get(&0xAAAABBBBCCCCDDDD)
desync_dedup_get_for_testing(shared.as_ref(), 0xAAAABBBBCCCCDDDD)
.expect("seed key must remain"),
seed_time,
"bypass path must not refresh existing dedup timestamps"
);
assert_eq!(
*dedup
.get(&0x1111222233334444)
desync_dedup_get_for_testing(shared.as_ref(), 0x1111222233334444)
.expect("seed key must remain"),
seed_time,
"bypass path must not touch unrelated dedup entries"
@@ -70,14 +63,12 @@ fn desync_all_full_bypass_keeps_existing_dedup_entries_unchanged() {
#[test]
fn edge_all_full_burst_does_not_poison_later_false_path_tracking() {
let _guard = desync_dedup_test_lock()
.lock()
.expect("desync dedup test lock must be available");
clear_desync_dedup_for_testing();
let shared = ProxySharedState::new();
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
let now = Instant::now();
for i in 0..8192u64 {
assert!(should_emit_full_desync(
assert!(should_emit_full_desync_for_testing(shared.as_ref(),
0xABCD_0000_0000_0000 ^ i,
true,
now
@@ -86,26 +77,20 @@ fn edge_all_full_burst_does_not_poison_later_false_path_tracking() {
let tracked_key = 0xDEAD_BEEF_0000_0001u64;
assert!(
should_emit_full_desync(tracked_key, false, now),
should_emit_full_desync_for_testing(shared.as_ref(), tracked_key, false, now),
"first false-path event after all_full burst must still be tracked and emitted"
);
let dedup = DESYNC_DEDUP
.get()
.expect("false path should initialize dedup");
assert!(dedup.get(&tracked_key).is_some());
assert!(desync_dedup_get_for_testing(shared.as_ref(), tracked_key).is_some());
}
#[test]
fn adversarial_mixed_sequence_true_steps_never_change_cache_len() {
let _guard = desync_dedup_test_lock()
.lock()
.expect("desync dedup test lock must be available");
clear_desync_dedup_for_testing();
let shared = ProxySharedState::new();
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
let dedup = DESYNC_DEDUP.get_or_init(DashMap::new);
for i in 0..256u64 {
dedup.insert(0x1000_0000_0000_0000 ^ i, Instant::now());
desync_dedup_insert_for_testing(shared.as_ref(), 0x1000_0000_0000_0000 ^ i, Instant::now());
}
let mut seed = 0xC0DE_CAFE_BAAD_F00Du64;
@@ -116,9 +101,9 @@ fn adversarial_mixed_sequence_true_steps_never_change_cache_len() {
let flag_all_full = (seed & 0x1) == 1;
let key = 0x7000_0000_0000_0000u64 ^ i ^ seed;
let before = dedup.len();
let _ = should_emit_full_desync(key, flag_all_full, Instant::now());
let after = dedup.len();
let before = desync_dedup_len_for_testing(shared.as_ref());
let _ = should_emit_full_desync_for_testing(shared.as_ref(), key, flag_all_full, Instant::now());
let after = desync_dedup_len_for_testing(shared.as_ref());
if flag_all_full {
assert_eq!(after, before, "all_full step must not mutate dedup length");
@@ -128,50 +113,46 @@ fn adversarial_mixed_sequence_true_steps_never_change_cache_len() {
#[test]
fn light_fuzz_all_full_mode_always_emits_and_stays_bounded() {
let _guard = desync_dedup_test_lock()
.lock()
.expect("desync dedup test lock must be available");
clear_desync_dedup_for_testing();
let shared = ProxySharedState::new();
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
let mut seed = 0x1234_5678_9ABC_DEF0u64;
let before = DESYNC_DEDUP.get().map(|d| d.len()).unwrap_or(0);
let before = desync_dedup_len_for_testing(shared.as_ref());
for _ in 0..20_000 {
seed ^= seed << 7;
seed ^= seed >> 9;
seed ^= seed << 8;
let key = seed ^ 0x55AA_55AA_55AA_55AAu64;
assert!(should_emit_full_desync(key, true, Instant::now()));
assert!(should_emit_full_desync_for_testing(shared.as_ref(), key, true, Instant::now()));
}
let after = DESYNC_DEDUP.get().map(|d| d.len()).unwrap_or(0);
let after = desync_dedup_len_for_testing(shared.as_ref());
assert_eq!(after, before);
assert!(after <= DESYNC_DEDUP_MAX_ENTRIES);
}
#[test]
fn stress_parallel_all_full_storm_does_not_grow_or_mutate_cache() {
let _guard = desync_dedup_test_lock()
.lock()
.expect("desync dedup test lock must be available");
clear_desync_dedup_for_testing();
let shared = ProxySharedState::new();
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
let dedup = DESYNC_DEDUP.get_or_init(DashMap::new);
let seed_time = Instant::now() - Duration::from_secs(2);
for i in 0..1024u64 {
dedup.insert(0x8888_0000_0000_0000 ^ i, seed_time);
desync_dedup_insert_for_testing(shared.as_ref(), 0x8888_0000_0000_0000 ^ i, seed_time);
}
let before_len = dedup.len();
let before_len = desync_dedup_len_for_testing(shared.as_ref());
let emits = Arc::new(AtomicUsize::new(0));
let mut workers = Vec::new();
for worker in 0..16u64 {
let emits = Arc::clone(&emits);
let shared = shared.clone();
workers.push(thread::spawn(move || {
let now = Instant::now();
for i in 0..4096u64 {
let key = 0xFACE_0000_0000_0000u64 ^ (worker << 20) ^ i;
if should_emit_full_desync(key, true, now) {
if should_emit_full_desync_for_testing(shared.as_ref(), key, true, now) {
emits.fetch_add(1, Ordering::Relaxed);
}
}
@@ -184,7 +165,7 @@ fn stress_parallel_all_full_storm_does_not_grow_or_mutate_cache() {
assert_eq!(emits.load(Ordering::Relaxed), 16 * 4096);
assert_eq!(
dedup.len(),
desync_dedup_len_for_testing(shared.as_ref()),
before_len,
"parallel all_full storm must not mutate cache len"
);

View File

@@ -360,73 +360,73 @@ async fn stress_many_idle_sessions_fail_closed_without_hang() {
#[test]
fn pressure_evicts_oldest_idle_candidate_with_deterministic_ordering() {
let _guard = relay_idle_pressure_test_scope();
clear_relay_idle_pressure_state_for_testing();
let shared = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
let stats = Stats::new();
assert!(mark_relay_idle_candidate(10));
assert!(mark_relay_idle_candidate(11));
assert_eq!(oldest_relay_idle_candidate(), Some(10));
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 10));
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 11));
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(10));
note_relay_pressure_event();
note_relay_pressure_event_for_testing(shared.as_ref());
let mut seen_for_newer = 0u64;
assert!(
!maybe_evict_idle_candidate_on_pressure(11, &mut seen_for_newer, &stats),
!maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 11, &mut seen_for_newer, &stats),
"newer idle candidate must not be evicted while older candidate exists"
);
assert_eq!(oldest_relay_idle_candidate(), Some(10));
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(10));
let mut seen_for_oldest = 0u64;
assert!(
maybe_evict_idle_candidate_on_pressure(10, &mut seen_for_oldest, &stats),
maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 10, &mut seen_for_oldest, &stats),
"oldest idle candidate must be evicted first under pressure"
);
assert_eq!(oldest_relay_idle_candidate(), Some(11));
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(11));
assert_eq!(stats.get_relay_pressure_evict_total(), 1);
clear_relay_idle_pressure_state_for_testing();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
}
#[test]
fn pressure_does_not_evict_without_new_pressure_signal() {
let _guard = relay_idle_pressure_test_scope();
clear_relay_idle_pressure_state_for_testing();
let shared = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
let stats = Stats::new();
assert!(mark_relay_idle_candidate(21));
let mut seen = relay_pressure_event_seq();
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 21));
let mut seen = relay_pressure_event_seq_for_testing(shared.as_ref());
assert!(
!maybe_evict_idle_candidate_on_pressure(21, &mut seen, &stats),
!maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 21, &mut seen, &stats),
"without new pressure signal, candidate must stay"
);
assert_eq!(stats.get_relay_pressure_evict_total(), 0);
assert_eq!(oldest_relay_idle_candidate(), Some(21));
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(21));
clear_relay_idle_pressure_state_for_testing();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
}
#[test]
fn stress_pressure_eviction_preserves_fifo_across_many_candidates() {
let _guard = relay_idle_pressure_test_scope();
clear_relay_idle_pressure_state_for_testing();
let shared = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
let stats = Stats::new();
let mut seen_per_conn = std::collections::HashMap::new();
for conn_id in 1000u64..1064u64 {
assert!(mark_relay_idle_candidate(conn_id));
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), conn_id));
seen_per_conn.insert(conn_id, 0u64);
}
for expected in 1000u64..1064u64 {
note_relay_pressure_event();
note_relay_pressure_event_for_testing(shared.as_ref());
let mut seen = *seen_per_conn
.get(&expected)
.expect("per-conn pressure cursor must exist");
assert!(
maybe_evict_idle_candidate_on_pressure(expected, &mut seen, &stats),
maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), expected, &mut seen, &stats),
"expected conn_id {expected} must be evicted next by deterministic FIFO ordering"
);
seen_per_conn.insert(expected, seen);
@@ -436,33 +436,33 @@ fn stress_pressure_eviction_preserves_fifo_across_many_candidates() {
} else {
Some(expected + 1)
};
assert_eq!(oldest_relay_idle_candidate(), next);
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), next);
}
assert_eq!(stats.get_relay_pressure_evict_total(), 64);
clear_relay_idle_pressure_state_for_testing();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
}
#[test]
fn blackhat_single_pressure_event_must_not_evict_more_than_one_candidate() {
let _guard = relay_idle_pressure_test_scope();
clear_relay_idle_pressure_state_for_testing();
let shared = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
let stats = Stats::new();
assert!(mark_relay_idle_candidate(301));
assert!(mark_relay_idle_candidate(302));
assert!(mark_relay_idle_candidate(303));
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 301));
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 302));
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 303));
let mut seen_301 = 0u64;
let mut seen_302 = 0u64;
let mut seen_303 = 0u64;
// Single pressure event should authorize at most one eviction globally.
note_relay_pressure_event();
note_relay_pressure_event_for_testing(shared.as_ref());
let evicted_301 = maybe_evict_idle_candidate_on_pressure(301, &mut seen_301, &stats);
let evicted_302 = maybe_evict_idle_candidate_on_pressure(302, &mut seen_302, &stats);
let evicted_303 = maybe_evict_idle_candidate_on_pressure(303, &mut seen_303, &stats);
let evicted_301 = maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 301, &mut seen_301, &stats);
let evicted_302 = maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 302, &mut seen_302, &stats);
let evicted_303 = maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 303, &mut seen_303, &stats);
let evicted_total = [evicted_301, evicted_302, evicted_303]
.iter()
@@ -474,30 +474,30 @@ fn blackhat_single_pressure_event_must_not_evict_more_than_one_candidate() {
"single pressure event must not cascade-evict multiple idle candidates"
);
clear_relay_idle_pressure_state_for_testing();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
}
#[test]
fn blackhat_pressure_counter_must_track_global_budget_not_per_session_cursor() {
let _guard = relay_idle_pressure_test_scope();
clear_relay_idle_pressure_state_for_testing();
let shared = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
let stats = Stats::new();
assert!(mark_relay_idle_candidate(401));
assert!(mark_relay_idle_candidate(402));
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 401));
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 402));
let mut seen_oldest = 0u64;
let mut seen_next = 0u64;
note_relay_pressure_event();
note_relay_pressure_event_for_testing(shared.as_ref());
assert!(
maybe_evict_idle_candidate_on_pressure(401, &mut seen_oldest, &stats),
maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 401, &mut seen_oldest, &stats),
"oldest candidate must consume pressure budget first"
);
assert!(
!maybe_evict_idle_candidate_on_pressure(402, &mut seen_next, &stats),
!maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 402, &mut seen_next, &stats),
"next candidate must not consume the same pressure budget"
);
@@ -507,47 +507,47 @@ fn blackhat_pressure_counter_must_track_global_budget_not_per_session_cursor() {
"single pressure budget must produce exactly one eviction"
);
clear_relay_idle_pressure_state_for_testing();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
}
#[test]
fn blackhat_stale_pressure_before_idle_mark_must_not_trigger_eviction() {
let _guard = relay_idle_pressure_test_scope();
clear_relay_idle_pressure_state_for_testing();
let shared = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
let stats = Stats::new();
// Pressure happened before any idle candidate existed.
note_relay_pressure_event();
assert!(mark_relay_idle_candidate(501));
note_relay_pressure_event_for_testing(shared.as_ref());
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 501));
let mut seen = 0u64;
assert!(
!maybe_evict_idle_candidate_on_pressure(501, &mut seen, &stats),
!maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 501, &mut seen, &stats),
"stale pressure (before soft-idle mark) must not evict newly marked candidate"
);
clear_relay_idle_pressure_state_for_testing();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
}
#[test]
fn blackhat_stale_pressure_must_not_evict_any_of_newly_marked_batch() {
let _guard = relay_idle_pressure_test_scope();
clear_relay_idle_pressure_state_for_testing();
let shared = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
let stats = Stats::new();
note_relay_pressure_event();
assert!(mark_relay_idle_candidate(511));
assert!(mark_relay_idle_candidate(512));
assert!(mark_relay_idle_candidate(513));
note_relay_pressure_event_for_testing(shared.as_ref());
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 511));
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 512));
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 513));
let mut seen_511 = 0u64;
let mut seen_512 = 0u64;
let mut seen_513 = 0u64;
let evicted = [
maybe_evict_idle_candidate_on_pressure(511, &mut seen_511, &stats),
maybe_evict_idle_candidate_on_pressure(512, &mut seen_512, &stats),
maybe_evict_idle_candidate_on_pressure(513, &mut seen_513, &stats),
maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 511, &mut seen_511, &stats),
maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 512, &mut seen_512, &stats),
maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 513, &mut seen_513, &stats),
]
.iter()
.filter(|value| **value)
@@ -558,111 +558,103 @@ fn blackhat_stale_pressure_must_not_evict_any_of_newly_marked_batch() {
"stale pressure event must not evict any candidate from a newly marked batch"
);
clear_relay_idle_pressure_state_for_testing();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
}
#[test]
fn blackhat_stale_pressure_seen_without_candidates_must_be_globally_invalidated() {
let _guard = relay_idle_pressure_test_scope();
clear_relay_idle_pressure_state_for_testing();
let shared = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
let stats = Stats::new();
note_relay_pressure_event();
note_relay_pressure_event_for_testing(shared.as_ref());
// Session A observed pressure while there were no candidates.
let mut seen_a = 0u64;
assert!(
!maybe_evict_idle_candidate_on_pressure(999_001, &mut seen_a, &stats),
!maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 999_001, &mut seen_a, &stats),
"no candidate existed, so no eviction is possible"
);
// Candidate appears later; Session B must not be able to consume stale pressure.
assert!(mark_relay_idle_candidate(521));
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 521));
let mut seen_b = 0u64;
assert!(
!maybe_evict_idle_candidate_on_pressure(521, &mut seen_b, &stats),
!maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 521, &mut seen_b, &stats),
"once pressure is observed with empty candidate set, it must not be replayed later"
);
clear_relay_idle_pressure_state_for_testing();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
}
#[test]
fn blackhat_stale_pressure_must_not_survive_candidate_churn() {
let _guard = relay_idle_pressure_test_scope();
clear_relay_idle_pressure_state_for_testing();
let shared = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
let stats = Stats::new();
note_relay_pressure_event();
assert!(mark_relay_idle_candidate(531));
clear_relay_idle_candidate(531);
assert!(mark_relay_idle_candidate(532));
note_relay_pressure_event_for_testing(shared.as_ref());
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 531));
clear_relay_idle_candidate_for_testing(shared.as_ref(), 531);
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 532));
let mut seen = 0u64;
assert!(
!maybe_evict_idle_candidate_on_pressure(532, &mut seen, &stats),
!maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 532, &mut seen, &stats),
"stale pressure must not survive clear+remark churn cycles"
);
clear_relay_idle_pressure_state_for_testing();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
}
#[test]
fn blackhat_pressure_seq_saturation_must_not_disable_future_pressure_accounting() {
let _guard = relay_idle_pressure_test_scope();
clear_relay_idle_pressure_state_for_testing();
let shared = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
{
let mut guard = relay_idle_candidate_registry()
.lock()
.expect("registry lock must be available");
guard.pressure_event_seq = u64::MAX;
guard.pressure_consumed_seq = u64::MAX - 1;
set_relay_pressure_state_for_testing(shared.as_ref(), u64::MAX, u64::MAX - 1);
}
// A new pressure event should still be representable; saturating at MAX creates a permanent lockout.
note_relay_pressure_event();
let after = relay_pressure_event_seq();
note_relay_pressure_event_for_testing(shared.as_ref());
let after = relay_pressure_event_seq_for_testing(shared.as_ref());
assert_ne!(
after,
u64::MAX,
"pressure sequence saturation must not permanently freeze event progression"
);
clear_relay_idle_pressure_state_for_testing();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
}
#[test]
fn blackhat_pressure_seq_saturation_must_not_break_multiple_distinct_events() {
let _guard = relay_idle_pressure_test_scope();
clear_relay_idle_pressure_state_for_testing();
let shared = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
{
let mut guard = relay_idle_candidate_registry()
.lock()
.expect("registry lock must be available");
guard.pressure_event_seq = u64::MAX;
guard.pressure_consumed_seq = u64::MAX;
set_relay_pressure_state_for_testing(shared.as_ref(), u64::MAX, u64::MAX);
}
note_relay_pressure_event();
let first = relay_pressure_event_seq();
note_relay_pressure_event();
let second = relay_pressure_event_seq();
note_relay_pressure_event_for_testing(shared.as_ref());
let first = relay_pressure_event_seq_for_testing(shared.as_ref());
note_relay_pressure_event_for_testing(shared.as_ref());
let second = relay_pressure_event_seq_for_testing(shared.as_ref());
assert!(
second > first,
"distinct pressure events must remain distinguishable even at sequence boundary"
);
clear_relay_idle_pressure_state_for_testing();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn integration_race_single_pressure_event_allows_at_most_one_eviction_under_parallel_claims()
{
let _guard = relay_idle_pressure_test_scope();
clear_relay_idle_pressure_state_for_testing();
let shared = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
let stats = Arc::new(Stats::new());
let sessions = 16usize;
@@ -671,20 +663,21 @@ async fn integration_race_single_pressure_event_allows_at_most_one_eviction_unde
let mut seen_per_session = vec![0u64; sessions];
for conn_id in &conn_ids {
assert!(mark_relay_idle_candidate(*conn_id));
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), *conn_id));
}
for round in 0..rounds {
note_relay_pressure_event();
note_relay_pressure_event_for_testing(shared.as_ref());
let mut joins = Vec::with_capacity(sessions);
for (idx, conn_id) in conn_ids.iter().enumerate() {
let mut seen = seen_per_session[idx];
let conn_id = *conn_id;
let stats = stats.clone();
let shared = shared.clone();
joins.push(tokio::spawn(async move {
let evicted =
maybe_evict_idle_candidate_on_pressure(conn_id, &mut seen, stats.as_ref());
maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), conn_id, &mut seen, stats.as_ref());
(idx, conn_id, seen, evicted)
}));
}
@@ -706,7 +699,7 @@ async fn integration_race_single_pressure_event_allows_at_most_one_eviction_unde
);
if let Some(conn) = evicted_conn {
assert!(
mark_relay_idle_candidate(conn),
mark_relay_idle_candidate_for_testing(shared.as_ref(), conn),
"round {round}: evicted conn must be re-markable as idle candidate"
);
}
@@ -721,13 +714,13 @@ async fn integration_race_single_pressure_event_allows_at_most_one_eviction_unde
"parallel race must still observe at least one successful eviction"
);
clear_relay_idle_pressure_state_for_testing();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalidation_and_budget() {
let _guard = relay_idle_pressure_test_scope();
clear_relay_idle_pressure_state_for_testing();
let shared = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
let stats = Arc::new(Stats::new());
let sessions = 12usize;
@@ -736,7 +729,7 @@ async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalida
let mut seen_per_session = vec![0u64; sessions];
for conn_id in &conn_ids {
assert!(mark_relay_idle_candidate(*conn_id));
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), *conn_id));
}
let mut expected_total_evictions = 0u64;
@@ -745,20 +738,21 @@ async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalida
let empty_phase = round % 5 == 0;
if empty_phase {
for conn_id in &conn_ids {
clear_relay_idle_candidate(*conn_id);
clear_relay_idle_candidate_for_testing(shared.as_ref(), *conn_id);
}
}
note_relay_pressure_event();
note_relay_pressure_event_for_testing(shared.as_ref());
let mut joins = Vec::with_capacity(sessions);
for (idx, conn_id) in conn_ids.iter().enumerate() {
let mut seen = seen_per_session[idx];
let conn_id = *conn_id;
let stats = stats.clone();
let shared = shared.clone();
joins.push(tokio::spawn(async move {
let evicted =
maybe_evict_idle_candidate_on_pressure(conn_id, &mut seen, stats.as_ref());
maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), conn_id, &mut seen, stats.as_ref());
(idx, conn_id, seen, evicted)
}));
}
@@ -780,7 +774,7 @@ async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalida
"round {round}: empty candidate phase must not allow stale-pressure eviction"
);
for conn_id in &conn_ids {
assert!(mark_relay_idle_candidate(*conn_id));
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), *conn_id));
}
} else {
assert!(
@@ -789,7 +783,7 @@ async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalida
);
if let Some(conn_id) = evicted_conn {
expected_total_evictions = expected_total_evictions.saturating_add(1);
assert!(mark_relay_idle_candidate(conn_id));
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), conn_id));
}
}
}
@@ -800,5 +794,5 @@ async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalida
"global pressure eviction counter must match observed per-round successful consumes"
);
clear_relay_idle_pressure_state_for_testing();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
}

View File

@@ -3,12 +3,13 @@ use std::panic::{AssertUnwindSafe, catch_unwind};
#[test]
fn blackhat_registry_poison_recovers_with_fail_closed_reset_and_pressure_accounting() {
let _guard = relay_idle_pressure_test_scope();
clear_relay_idle_pressure_state_for_testing();
let shared = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
let _ = catch_unwind(AssertUnwindSafe(|| {
let registry = relay_idle_candidate_registry();
let mut guard = registry
let mut guard = shared
.middle_relay
.relay_idle_registry
.lock()
.expect("registry lock must be acquired before poison");
guard.by_conn_id.insert(
@@ -23,40 +24,41 @@ fn blackhat_registry_poison_recovers_with_fail_closed_reset_and_pressure_account
}));
// Helper lock must recover from poison, reset stale state, and continue.
assert!(mark_relay_idle_candidate(42));
assert_eq!(oldest_relay_idle_candidate(), Some(42));
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 42));
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(42));
let before = relay_pressure_event_seq();
note_relay_pressure_event();
let after = relay_pressure_event_seq();
let before = relay_pressure_event_seq_for_testing(shared.as_ref());
note_relay_pressure_event_for_testing(shared.as_ref());
let after = relay_pressure_event_seq_for_testing(shared.as_ref());
assert!(
after > before,
"pressure accounting must still advance after poison"
);
clear_relay_idle_pressure_state_for_testing();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
}
#[test]
fn clear_state_helper_must_reset_poisoned_registry_for_deterministic_fifo_tests() {
let _guard = relay_idle_pressure_test_scope();
clear_relay_idle_pressure_state_for_testing();
let shared = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
let _ = catch_unwind(AssertUnwindSafe(|| {
let registry = relay_idle_candidate_registry();
let _guard = registry
let _guard = shared
.middle_relay
.relay_idle_registry
.lock()
.expect("registry lock must be acquired before poison");
panic!("intentional poison while lock held");
}));
clear_relay_idle_pressure_state_for_testing();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
assert_eq!(oldest_relay_idle_candidate(), None);
assert_eq!(relay_pressure_event_seq(), 0);
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), None);
assert_eq!(relay_pressure_event_seq_for_testing(shared.as_ref()), 0);
assert!(mark_relay_idle_candidate(7));
assert_eq!(oldest_relay_idle_candidate(), Some(7));
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 7));
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(7));
clear_relay_idle_pressure_state_for_testing();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
}

View File

@@ -1,7 +1,6 @@
use super::*;
use crate::stats::Stats;
use crate::stream::BufferPool;
use std::collections::HashSet;
use std::sync::Arc;
use tokio::time::{Duration as TokioDuration, timeout};
@@ -16,32 +15,30 @@ fn make_pooled_payload(data: &[u8]) -> PooledBuffer {
#[test]
#[ignore = "Tracking for M-04: Verify should_emit_full_desync returns true on first occurrence and false on duplicate within window"]
fn should_emit_full_desync_filters_duplicates() {
let _guard = desync_dedup_test_lock()
.lock()
.expect("desync dedup test lock must be available");
clear_desync_dedup_for_testing();
let shared = ProxySharedState::new();
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
let key = 0x4D04_0000_0000_0001_u64;
let base = Instant::now();
assert!(
should_emit_full_desync(key, false, base),
should_emit_full_desync_for_testing(shared.as_ref(), key, false, base),
"first occurrence must emit full forensic record"
);
assert!(
!should_emit_full_desync(key, false, base),
!should_emit_full_desync_for_testing(shared.as_ref(), key, false, base),
"duplicate at same timestamp must be suppressed"
);
let within_window = base + DESYNC_DEDUP_WINDOW - TokioDuration::from_millis(1);
assert!(
!should_emit_full_desync(key, false, within_window),
!should_emit_full_desync_for_testing(shared.as_ref(), key, false, within_window),
"duplicate strictly inside dedup window must stay suppressed"
);
let on_window_edge = base + DESYNC_DEDUP_WINDOW;
assert!(
should_emit_full_desync(key, false, on_window_edge),
should_emit_full_desync_for_testing(shared.as_ref(), key, false, on_window_edge),
"duplicate at window boundary must re-emit and refresh"
);
}
@@ -49,39 +46,34 @@ fn should_emit_full_desync_filters_duplicates() {
#[test]
#[ignore = "Tracking for M-04: Verify desync dedup eviction behaves correctly under map-full condition"]
fn desync_dedup_eviction_under_map_full_condition() {
let _guard = desync_dedup_test_lock()
.lock()
.expect("desync dedup test lock must be available");
clear_desync_dedup_for_testing();
let shared = ProxySharedState::new();
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
let base = Instant::now();
for key in 0..DESYNC_DEDUP_MAX_ENTRIES as u64 {
assert!(
should_emit_full_desync(key, false, base),
should_emit_full_desync_for_testing(shared.as_ref(), key, false, base),
"unique key should be inserted while warming dedup cache"
);
}
let dedup = DESYNC_DEDUP
.get()
.expect("dedup map must exist after warm-up insertions");
assert_eq!(
dedup.len(),
desync_dedup_len_for_testing(shared.as_ref()),
DESYNC_DEDUP_MAX_ENTRIES,
"cache warm-up must reach exact hard cap"
);
let before_keys: HashSet<u64> = dedup.iter().map(|entry| *entry.key()).collect();
let before_keys = desync_dedup_keys_for_testing(shared.as_ref());
let newcomer_key = 0x4D04_FFFF_FFFF_0001_u64;
assert!(
should_emit_full_desync(newcomer_key, false, base),
should_emit_full_desync_for_testing(shared.as_ref(), newcomer_key, false, base),
"first newcomer at map-full must emit under bounded full-cache gate"
);
let after_keys: HashSet<u64> = dedup.iter().map(|entry| *entry.key()).collect();
let after_keys = desync_dedup_keys_for_testing(shared.as_ref());
assert_eq!(
dedup.len(),
desync_dedup_len_for_testing(shared.as_ref()),
DESYNC_DEDUP_MAX_ENTRIES,
"map-full insertion must preserve hard capacity bound"
);
@@ -102,7 +94,7 @@ fn desync_dedup_eviction_under_map_full_condition() {
);
assert!(
!should_emit_full_desync(newcomer_key, false, base),
!should_emit_full_desync_for_testing(shared.as_ref(), newcomer_key, false, base),
"immediate duplicate newcomer must remain suppressed"
);
}

View File

@@ -0,0 +1,608 @@
use crate::proxy::handshake::{
auth_probe_fail_streak_for_testing_in_shared, auth_probe_is_throttled_for_testing_in_shared,
auth_probe_record_failure_for_testing, clear_auth_probe_state_for_testing_in_shared,
clear_unknown_sni_warn_state_for_testing_in_shared, clear_warned_secrets_for_testing_in_shared,
should_emit_unknown_sni_warn_for_testing_in_shared, warned_secrets_for_testing_in_shared,
};
use crate::proxy::client::handle_client_stream_with_shared;
use crate::proxy::middle_relay::{
clear_desync_dedup_for_testing_in_shared, clear_relay_idle_candidate_for_testing,
clear_relay_idle_pressure_state_for_testing_in_shared, mark_relay_idle_candidate_for_testing,
maybe_evict_idle_candidate_on_pressure_for_testing, note_relay_pressure_event_for_testing,
oldest_relay_idle_candidate_for_testing, relay_idle_mark_seq_for_testing,
relay_pressure_event_seq_for_testing, should_emit_full_desync_for_testing,
};
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
use crate::proxy::shared_state::ProxySharedState;
use crate::{
config::{ProxyConfig, UpstreamConfig, UpstreamType},
crypto::SecureRandom,
ip_tracker::UserIpTracker,
stats::{ReplayChecker, Stats, beobachten::BeobachtenStore},
stream::BufferPool,
transport::UpstreamManager,
};
use std::net::{IpAddr, Ipv4Addr};
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::io::{AsyncWriteExt, duplex};
use tokio::sync::Barrier;
struct ClientHarness {
config: Arc<ProxyConfig>,
stats: Arc<Stats>,
upstream_manager: Arc<UpstreamManager>,
replay_checker: Arc<ReplayChecker>,
buffer_pool: Arc<BufferPool>,
rng: Arc<SecureRandom>,
route_runtime: Arc<RouteRuntimeController>,
ip_tracker: Arc<UserIpTracker>,
beobachten: Arc<BeobachtenStore>,
}
fn new_client_harness() -> ClientHarness {
let mut cfg = ProxyConfig::default();
cfg.censorship.mask = false;
cfg.general.modes.classic = true;
cfg.general.modes.secure = true;
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
let upstream_manager = Arc::new(UpstreamManager::new(
vec![UpstreamConfig {
upstream_type: UpstreamType::Direct {
interface: None,
bind_addresses: None,
},
weight: 1,
enabled: true,
scopes: String::new(),
selected_scope: String::new(),
}],
1,
1,
1,
10,
1,
false,
stats.clone(),
));
ClientHarness {
config,
stats,
upstream_manager,
replay_checker: Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
buffer_pool: Arc::new(BufferPool::new()),
rng: Arc::new(SecureRandom::new()),
route_runtime: Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
ip_tracker: Arc::new(UserIpTracker::new()),
beobachten: Arc::new(BeobachtenStore::new()),
}
}
async fn drive_invalid_mtproto_handshake(shared: Arc<ProxySharedState>, peer: std::net::SocketAddr) {
let harness = new_client_harness();
let (server_side, mut client_side) = duplex(4096);
let invalid = [0u8; 64];
let task = tokio::spawn(handle_client_stream_with_shared(
server_side,
peer,
harness.config,
harness.stats,
harness.upstream_manager,
harness.replay_checker,
harness.buffer_pool,
harness.rng,
None,
harness.route_runtime,
None,
harness.ip_tracker,
harness.beobachten,
shared,
false,
));
client_side
.write_all(&invalid)
.await
.expect("failed to write invalid handshake");
client_side.shutdown().await.expect("failed to shutdown client");
let _ = tokio::time::timeout(Duration::from_secs(3), task)
.await
.expect("client task timed out")
.expect("client task join failed");
}
#[test]
fn proxy_shared_state_two_instances_do_not_share_auth_probe_state() {
let a = ProxySharedState::new();
let b = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(a.as_ref());
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 10));
auth_probe_record_failure_for_testing(a.as_ref(), ip, Instant::now());
assert_eq!(
auth_probe_fail_streak_for_testing_in_shared(a.as_ref(), ip),
Some(1)
);
assert_eq!(auth_probe_fail_streak_for_testing_in_shared(b.as_ref(), ip), None);
}
#[test]
fn proxy_shared_state_two_instances_do_not_share_desync_dedup() {
let a = ProxySharedState::new();
let b = ProxySharedState::new();
clear_desync_dedup_for_testing_in_shared(a.as_ref());
let now = Instant::now();
let key = 0xA5A5_u64;
assert!(should_emit_full_desync_for_testing(a.as_ref(), key, false, now));
assert!(should_emit_full_desync_for_testing(b.as_ref(), key, false, now));
}
#[test]
fn proxy_shared_state_two_instances_do_not_share_idle_registry() {
let a = ProxySharedState::new();
let b = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(a.as_ref());
assert!(mark_relay_idle_candidate_for_testing(a.as_ref(), 111));
assert_eq!(oldest_relay_idle_candidate_for_testing(a.as_ref()), Some(111));
assert_eq!(oldest_relay_idle_candidate_for_testing(b.as_ref()), None);
}
#[test]
fn proxy_shared_state_reset_in_one_instance_does_not_affect_another() {
let a = ProxySharedState::new();
let b = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(a.as_ref());
let ip_a = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 1));
let ip_b = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 2));
let now = Instant::now();
auth_probe_record_failure_for_testing(a.as_ref(), ip_a, now);
auth_probe_record_failure_for_testing(b.as_ref(), ip_b, now);
clear_auth_probe_state_for_testing_in_shared(a.as_ref());
assert_eq!(auth_probe_fail_streak_for_testing_in_shared(a.as_ref(), ip_a), None);
assert_eq!(
auth_probe_fail_streak_for_testing_in_shared(b.as_ref(), ip_b),
Some(1)
);
}
#[test]
fn proxy_shared_state_parallel_auth_probe_updates_stay_per_instance() {
let a = ProxySharedState::new();
let b = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(a.as_ref());
let ip = IpAddr::V4(Ipv4Addr::new(192, 0, 2, 77));
let now = Instant::now();
for _ in 0..5 {
auth_probe_record_failure_for_testing(a.as_ref(), ip, now);
}
for _ in 0..3 {
auth_probe_record_failure_for_testing(b.as_ref(), ip, now + Duration::from_millis(1));
}
assert_eq!(auth_probe_fail_streak_for_testing_in_shared(a.as_ref(), ip), Some(5));
assert_eq!(auth_probe_fail_streak_for_testing_in_shared(b.as_ref(), ip), Some(3));
}
#[tokio::test]
async fn proxy_shared_state_client_pipeline_records_probe_failures_in_instance_state() {
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let peer_ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 200));
let peer = std::net::SocketAddr::new(peer_ip, 54001);
drive_invalid_mtproto_handshake(shared.clone(), peer).await;
assert_eq!(
auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer_ip),
Some(1),
"invalid handshake in client pipeline must update injected shared auth-probe state"
);
}
#[tokio::test]
async fn proxy_shared_state_client_pipeline_keeps_auth_probe_isolated_between_instances() {
let shared_a = ProxySharedState::new();
let shared_b = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared_a.as_ref());
clear_auth_probe_state_for_testing_in_shared(shared_b.as_ref());
let peer_a_ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 210));
let peer_b_ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 211));
drive_invalid_mtproto_handshake(
shared_a.clone(),
std::net::SocketAddr::new(peer_a_ip, 54110),
)
.await;
drive_invalid_mtproto_handshake(
shared_b.clone(),
std::net::SocketAddr::new(peer_b_ip, 54111),
)
.await;
assert_eq!(
auth_probe_fail_streak_for_testing_in_shared(shared_a.as_ref(), peer_a_ip),
Some(1)
);
assert_eq!(
auth_probe_fail_streak_for_testing_in_shared(shared_b.as_ref(), peer_b_ip),
Some(1)
);
assert_eq!(
auth_probe_fail_streak_for_testing_in_shared(shared_a.as_ref(), peer_b_ip),
None
);
assert_eq!(
auth_probe_fail_streak_for_testing_in_shared(shared_b.as_ref(), peer_a_ip),
None
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn proxy_shared_state_client_pipeline_high_contention_same_ip_stays_lossless_per_instance() {
let shared_a = ProxySharedState::new();
let shared_b = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared_a.as_ref());
clear_auth_probe_state_for_testing_in_shared(shared_b.as_ref());
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 250));
let workers = 48u16;
let barrier = Arc::new(Barrier::new((workers as usize) * 2));
let mut tasks = Vec::new();
for i in 0..workers {
let shared_a = shared_a.clone();
let barrier_a = barrier.clone();
let peer_a = std::net::SocketAddr::new(ip, 56000 + i);
tasks.push(tokio::spawn(async move {
barrier_a.wait().await;
drive_invalid_mtproto_handshake(shared_a, peer_a).await;
}));
let shared_b = shared_b.clone();
let barrier_b = barrier.clone();
let peer_b = std::net::SocketAddr::new(ip, 56100 + i);
tasks.push(tokio::spawn(async move {
barrier_b.wait().await;
drive_invalid_mtproto_handshake(shared_b, peer_b).await;
}));
}
for task in tasks {
task.await.expect("pipeline task join failed");
}
let streak_a = auth_probe_fail_streak_for_testing_in_shared(shared_a.as_ref(), ip)
.expect("instance A must track probe failures");
let streak_b = auth_probe_fail_streak_for_testing_in_shared(shared_b.as_ref(), ip)
.expect("instance B must track probe failures");
assert!(streak_a > 0);
assert!(streak_b > 0);
clear_auth_probe_state_for_testing_in_shared(shared_a.as_ref());
assert_eq!(
auth_probe_fail_streak_for_testing_in_shared(shared_a.as_ref(), ip),
None,
"clearing one instance must reset only that instance"
);
assert!(
auth_probe_fail_streak_for_testing_in_shared(shared_b.as_ref(), ip).is_some(),
"clearing one instance must not clear the other instance"
);
}
#[test]
fn proxy_shared_state_auth_saturation_does_not_bleed_across_instances() {
let a = ProxySharedState::new();
let b = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(a.as_ref());
clear_auth_probe_state_for_testing_in_shared(b.as_ref());
let ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 77));
let future_now = Instant::now() + Duration::from_secs(1);
for _ in 0..8 {
auth_probe_record_failure_for_testing(a.as_ref(), ip, future_now);
}
assert!(auth_probe_is_throttled_for_testing_in_shared(a.as_ref(), ip));
assert!(!auth_probe_is_throttled_for_testing_in_shared(b.as_ref(), ip));
}
#[test]
fn proxy_shared_state_poison_clear_in_one_instance_does_not_affect_other_instance() {
let a = ProxySharedState::new();
let b = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(a.as_ref());
clear_auth_probe_state_for_testing_in_shared(b.as_ref());
let ip_a = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 31));
let ip_b = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 32));
let now = Instant::now();
auth_probe_record_failure_for_testing(a.as_ref(), ip_a, now);
auth_probe_record_failure_for_testing(b.as_ref(), ip_b, now);
let a_for_poison = a.clone();
let _ = std::thread::spawn(move || {
let _hold = a_for_poison
.handshake
.auth_probe_saturation
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
panic!("intentional poison for per-instance isolation regression coverage");
})
.join();
clear_auth_probe_state_for_testing_in_shared(a.as_ref());
assert_eq!(auth_probe_fail_streak_for_testing_in_shared(a.as_ref(), ip_a), None);
assert_eq!(
auth_probe_fail_streak_for_testing_in_shared(b.as_ref(), ip_b),
Some(1),
"poison recovery and clear in one instance must not touch other instance state"
);
}
#[test]
fn proxy_shared_state_unknown_sni_cooldown_does_not_bleed_across_instances() {
let a = ProxySharedState::new();
let b = ProxySharedState::new();
clear_unknown_sni_warn_state_for_testing_in_shared(a.as_ref());
clear_unknown_sni_warn_state_for_testing_in_shared(b.as_ref());
let now = Instant::now();
assert!(should_emit_unknown_sni_warn_for_testing_in_shared(
a.as_ref(),
now
));
assert!(should_emit_unknown_sni_warn_for_testing_in_shared(
b.as_ref(),
now
));
}
#[test]
fn proxy_shared_state_warned_secret_cache_does_not_bleed_across_instances() {
let a = ProxySharedState::new();
let b = ProxySharedState::new();
clear_warned_secrets_for_testing_in_shared(a.as_ref());
clear_warned_secrets_for_testing_in_shared(b.as_ref());
let key = ("isolation-user".to_string(), "invalid_hex".to_string());
{
let warned = warned_secrets_for_testing_in_shared(a.as_ref());
let mut guard = warned
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
guard.insert(key.clone());
}
let contains_in_a = {
let warned = warned_secrets_for_testing_in_shared(a.as_ref());
let guard = warned
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
guard.contains(&key)
};
let contains_in_b = {
let warned = warned_secrets_for_testing_in_shared(b.as_ref());
let guard = warned
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
guard.contains(&key)
};
assert!(contains_in_a);
assert!(!contains_in_b);
}
#[test]
fn proxy_shared_state_idle_mark_seq_is_per_instance() {
let a = ProxySharedState::new();
let b = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(a.as_ref());
clear_relay_idle_pressure_state_for_testing_in_shared(b.as_ref());
assert_eq!(relay_idle_mark_seq_for_testing(a.as_ref()), 0);
assert_eq!(relay_idle_mark_seq_for_testing(b.as_ref()), 0);
assert!(mark_relay_idle_candidate_for_testing(a.as_ref(), 9001));
assert_eq!(relay_idle_mark_seq_for_testing(a.as_ref()), 1);
assert_eq!(relay_idle_mark_seq_for_testing(b.as_ref()), 0);
assert!(mark_relay_idle_candidate_for_testing(b.as_ref(), 9002));
assert_eq!(relay_idle_mark_seq_for_testing(a.as_ref()), 1);
assert_eq!(relay_idle_mark_seq_for_testing(b.as_ref()), 1);
}
#[test]
fn proxy_shared_state_unknown_sni_clear_in_one_instance_does_not_reset_other() {
let a = ProxySharedState::new();
let b = ProxySharedState::new();
clear_unknown_sni_warn_state_for_testing_in_shared(a.as_ref());
clear_unknown_sni_warn_state_for_testing_in_shared(b.as_ref());
let now = Instant::now();
assert!(should_emit_unknown_sni_warn_for_testing_in_shared(
a.as_ref(),
now
));
assert!(should_emit_unknown_sni_warn_for_testing_in_shared(
b.as_ref(),
now
));
clear_unknown_sni_warn_state_for_testing_in_shared(a.as_ref());
assert!(should_emit_unknown_sni_warn_for_testing_in_shared(
a.as_ref(),
now + Duration::from_millis(1)
));
assert!(!should_emit_unknown_sni_warn_for_testing_in_shared(
b.as_ref(),
now + Duration::from_millis(1)
));
}
#[test]
fn proxy_shared_state_warned_secret_clear_in_one_instance_does_not_clear_other() {
let a = ProxySharedState::new();
let b = ProxySharedState::new();
clear_warned_secrets_for_testing_in_shared(a.as_ref());
clear_warned_secrets_for_testing_in_shared(b.as_ref());
let key = ("clear-isolation-user".to_string(), "invalid_length".to_string());
{
let warned_a = warned_secrets_for_testing_in_shared(a.as_ref());
let mut guard_a = warned_a
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
guard_a.insert(key.clone());
let warned_b = warned_secrets_for_testing_in_shared(b.as_ref());
let mut guard_b = warned_b
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
guard_b.insert(key.clone());
}
clear_warned_secrets_for_testing_in_shared(a.as_ref());
let has_a = {
let warned = warned_secrets_for_testing_in_shared(a.as_ref());
let guard = warned
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
guard.contains(&key)
};
let has_b = {
let warned = warned_secrets_for_testing_in_shared(b.as_ref());
let guard = warned
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
guard.contains(&key)
};
assert!(!has_a);
assert!(has_b);
}
#[test]
fn proxy_shared_state_desync_duplicate_suppression_is_instance_scoped() {
let a = ProxySharedState::new();
let b = ProxySharedState::new();
clear_desync_dedup_for_testing_in_shared(a.as_ref());
clear_desync_dedup_for_testing_in_shared(b.as_ref());
let now = Instant::now();
let key = 0xBEEF_0000_0000_0001u64;
assert!(should_emit_full_desync_for_testing(a.as_ref(), key, false, now));
assert!(!should_emit_full_desync_for_testing(
a.as_ref(),
key,
false,
now + Duration::from_millis(1)
));
assert!(should_emit_full_desync_for_testing(b.as_ref(), key, false, now));
}
#[test]
fn proxy_shared_state_desync_clear_in_one_instance_does_not_clear_other() {
let a = ProxySharedState::new();
let b = ProxySharedState::new();
clear_desync_dedup_for_testing_in_shared(a.as_ref());
clear_desync_dedup_for_testing_in_shared(b.as_ref());
let now = Instant::now();
let key = 0xCAFE_0000_0000_0001u64;
assert!(should_emit_full_desync_for_testing(a.as_ref(), key, false, now));
assert!(should_emit_full_desync_for_testing(b.as_ref(), key, false, now));
clear_desync_dedup_for_testing_in_shared(a.as_ref());
assert!(should_emit_full_desync_for_testing(
a.as_ref(),
key,
false,
now + Duration::from_millis(2)
));
assert!(!should_emit_full_desync_for_testing(
b.as_ref(),
key,
false,
now + Duration::from_millis(2)
));
}
#[test]
fn proxy_shared_state_idle_candidate_clear_in_one_instance_does_not_affect_other() {
let a = ProxySharedState::new();
let b = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(a.as_ref());
clear_relay_idle_pressure_state_for_testing_in_shared(b.as_ref());
assert!(mark_relay_idle_candidate_for_testing(a.as_ref(), 1001));
assert!(mark_relay_idle_candidate_for_testing(b.as_ref(), 2002));
clear_relay_idle_candidate_for_testing(a.as_ref(), 1001);
assert_eq!(oldest_relay_idle_candidate_for_testing(a.as_ref()), None);
assert_eq!(oldest_relay_idle_candidate_for_testing(b.as_ref()), Some(2002));
}
#[test]
fn proxy_shared_state_pressure_seq_increments_are_instance_scoped() {
let a = ProxySharedState::new();
let b = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(a.as_ref());
clear_relay_idle_pressure_state_for_testing_in_shared(b.as_ref());
assert_eq!(relay_pressure_event_seq_for_testing(a.as_ref()), 0);
assert_eq!(relay_pressure_event_seq_for_testing(b.as_ref()), 0);
note_relay_pressure_event_for_testing(a.as_ref());
note_relay_pressure_event_for_testing(a.as_ref());
assert_eq!(relay_pressure_event_seq_for_testing(a.as_ref()), 2);
assert_eq!(relay_pressure_event_seq_for_testing(b.as_ref()), 0);
}
#[test]
fn proxy_shared_state_pressure_consumption_does_not_cross_instances() {
let a = ProxySharedState::new();
let b = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(a.as_ref());
clear_relay_idle_pressure_state_for_testing_in_shared(b.as_ref());
assert!(mark_relay_idle_candidate_for_testing(a.as_ref(), 7001));
assert!(mark_relay_idle_candidate_for_testing(b.as_ref(), 7001));
note_relay_pressure_event_for_testing(a.as_ref());
let stats = Stats::new();
let mut seen_a = 0u64;
let mut seen_b = 0u64;
assert!(maybe_evict_idle_candidate_on_pressure_for_testing(
a.as_ref(),
7001,
&mut seen_a,
&stats
));
assert!(!maybe_evict_idle_candidate_on_pressure_for_testing(
b.as_ref(),
7001,
&mut seen_b,
&stats
));
}

View File

@@ -0,0 +1,255 @@
use crate::proxy::handshake::{
auth_probe_fail_streak_for_testing_in_shared, auth_probe_record_failure_for_testing,
clear_auth_probe_state_for_testing_in_shared, clear_unknown_sni_warn_state_for_testing_in_shared,
should_emit_unknown_sni_warn_for_testing_in_shared,
};
use crate::proxy::middle_relay::{
clear_desync_dedup_for_testing_in_shared, clear_relay_idle_pressure_state_for_testing_in_shared,
mark_relay_idle_candidate_for_testing, oldest_relay_idle_candidate_for_testing,
should_emit_full_desync_for_testing,
};
use crate::proxy::shared_state::ProxySharedState;
use rand::SeedableRng;
use rand::RngExt;
use rand::rngs::StdRng;
use std::net::{IpAddr, Ipv4Addr};
use std::sync::Arc;
use std::time::Instant;
use tokio::sync::Barrier;
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn proxy_shared_state_50_concurrent_instances_no_counter_bleed() {
let mut handles = Vec::new();
for i in 0..50_u8 {
handles.push(tokio::spawn(async move {
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 200));
auth_probe_record_failure_for_testing(shared.as_ref(), ip, Instant::now());
auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), ip)
}));
}
for handle in handles {
let streak = handle.await.expect("task join failed");
assert_eq!(streak, Some(1));
}
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn proxy_shared_state_desync_rotation_concurrent_20_instances() {
let now = Instant::now();
let key = 0xD35E_D35E_u64;
let mut handles = Vec::new();
for _ in 0..20_u64 {
handles.push(tokio::spawn(async move {
let shared = ProxySharedState::new();
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
should_emit_full_desync_for_testing(shared.as_ref(), key, false, now)
}));
}
for handle in handles {
let emitted = handle.await.expect("task join failed");
assert!(emitted);
}
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn proxy_shared_state_idle_registry_concurrent_10_instances() {
let mut handles = Vec::new();
let conn_id = 42_u64;
for _ in 1..=10_u64 {
handles.push(tokio::spawn(async move {
let shared = ProxySharedState::new();
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
let marked = mark_relay_idle_candidate_for_testing(shared.as_ref(), conn_id);
let oldest = oldest_relay_idle_candidate_for_testing(shared.as_ref());
(marked, oldest)
}));
}
for (i, handle) in handles.into_iter().enumerate() {
let (marked, oldest) = handle.await.expect("task join failed");
assert!(marked, "instance {} failed to mark", i);
assert_eq!(oldest, Some(conn_id));
}
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn proxy_shared_state_dual_instance_same_ip_high_contention_no_counter_bleed() {
let a = ProxySharedState::new();
let b = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(a.as_ref());
clear_auth_probe_state_for_testing_in_shared(b.as_ref());
let ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 200));
let mut handles = Vec::new();
for _ in 0..64 {
let a = a.clone();
let b = b.clone();
handles.push(tokio::spawn(async move {
auth_probe_record_failure_for_testing(a.as_ref(), ip, Instant::now());
auth_probe_record_failure_for_testing(b.as_ref(), ip, Instant::now());
}));
}
for handle in handles {
handle.await.expect("task join failed");
}
assert_eq!(auth_probe_fail_streak_for_testing_in_shared(a.as_ref(), ip), Some(64));
assert_eq!(auth_probe_fail_streak_for_testing_in_shared(b.as_ref(), ip), Some(64));
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn proxy_shared_state_unknown_sni_parallel_instances_no_cross_cooldown() {
let mut handles = Vec::new();
let now = Instant::now();
for _ in 0..32 {
handles.push(tokio::spawn(async move {
let shared = ProxySharedState::new();
clear_unknown_sni_warn_state_for_testing_in_shared(shared.as_ref());
let first = should_emit_unknown_sni_warn_for_testing_in_shared(shared.as_ref(), now);
let second = should_emit_unknown_sni_warn_for_testing_in_shared(
shared.as_ref(),
now + std::time::Duration::from_millis(1),
);
(first, second)
}));
}
for handle in handles {
let (first, second) = handle.await.expect("task join failed");
assert!(first);
assert!(!second);
}
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn proxy_shared_state_auth_probe_high_contention_increments_are_lossless() {
let shared = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 33));
let workers = 128usize;
let rounds = 20usize;
for _ in 0..rounds {
let start = Arc::new(Barrier::new(workers));
let mut handles = Vec::with_capacity(workers);
for _ in 0..workers {
let shared = shared.clone();
let start = start.clone();
handles.push(tokio::spawn(async move {
start.wait().await;
auth_probe_record_failure_for_testing(shared.as_ref(), ip, Instant::now());
}));
}
for handle in handles {
handle.await.expect("task join failed");
}
}
let expected = (workers * rounds) as u32;
assert_eq!(
auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), ip),
Some(expected),
"auth probe fail streak must account for every concurrent update"
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn proxy_shared_state_seed_matrix_concurrency_isolation_no_counter_bleed() {
let seeds: [u64; 8] = [
0x0000_0000_0000_0001,
0x1111_1111_1111_1111,
0xA5A5_A5A5_A5A5_A5A5,
0xDEAD_BEEF_CAFE_BABE,
0x0123_4567_89AB_CDEF,
0xFEDC_BA98_7654_3210,
0x0F0F_F0F0_55AA_AA55,
0x1357_9BDF_2468_ACE0,
];
for seed in seeds {
let mut rng = StdRng::seed_from_u64(seed);
let shared_a = ProxySharedState::new();
let shared_b = ProxySharedState::new();
clear_auth_probe_state_for_testing_in_shared(shared_a.as_ref());
clear_auth_probe_state_for_testing_in_shared(shared_b.as_ref());
let ip = IpAddr::V4(Ipv4Addr::new(
198,
51,
100,
rng.random_range(1_u8..=250_u8),
));
let workers = rng.random_range(16_usize..=48_usize);
let rounds = rng.random_range(4_usize..=10_usize);
let mut expected_a: u32 = 0;
let mut expected_b: u32 = 0;
for _ in 0..rounds {
let start = Arc::new(Barrier::new(workers * 2));
let mut handles = Vec::with_capacity(workers * 2);
for _ in 0..workers {
let a_ops = rng.random_range(1_u32..=3_u32);
let b_ops = rng.random_range(1_u32..=3_u32);
expected_a = expected_a.saturating_add(a_ops);
expected_b = expected_b.saturating_add(b_ops);
let shared_a = shared_a.clone();
let start_a = start.clone();
handles.push(tokio::spawn(async move {
start_a.wait().await;
for _ in 0..a_ops {
auth_probe_record_failure_for_testing(shared_a.as_ref(), ip, Instant::now());
}
}));
let shared_b = shared_b.clone();
let start_b = start.clone();
handles.push(tokio::spawn(async move {
start_b.wait().await;
for _ in 0..b_ops {
auth_probe_record_failure_for_testing(shared_b.as_ref(), ip, Instant::now());
}
}));
}
for handle in handles {
handle.await.expect("task join failed");
}
}
assert_eq!(
auth_probe_fail_streak_for_testing_in_shared(shared_a.as_ref(), ip),
Some(expected_a),
"seed {seed:#x}: instance A streak mismatch"
);
assert_eq!(
auth_probe_fail_streak_for_testing_in_shared(shared_b.as_ref(), ip),
Some(expected_b),
"seed {seed:#x}: instance B streak mismatch"
);
clear_auth_probe_state_for_testing_in_shared(shared_a.as_ref());
assert_eq!(
auth_probe_fail_streak_for_testing_in_shared(shared_a.as_ref(), ip),
None,
"seed {seed:#x}: clearing A must reset only A"
);
assert_eq!(
auth_probe_fail_streak_for_testing_in_shared(shared_b.as_ref(), ip),
Some(expected_b),
"seed {seed:#x}: clearing A must not mutate B"
);
}
}